repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wheeler-microfluidics/dmf_control_board
|
dmf_control_board_firmware/chip_test/plot.py
|
3
|
6079
|
# coding: utf-8
r'''
This module contains code to plot the capacitance of each channel on a digital
microfluidics chip. The plots are useful, for example, to identify potential
open circuit connections (e.g., broken traces).
'''
from matplotlib.colors import Normalize
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import FuncFormatter
from matplotlib.transforms import offset_copy
from si_prefix import si_format
from svg_model.plot import plot_shapes_heat_map, plot_color_map_bars
import matplotlib.cm as mcm
import matplotlib.pyplot as plt
import pandas as pd
F_formatter = FuncFormatter(lambda x, pos: '%sF' % si_format(x))
m_formatter = FuncFormatter(lambda x, pos: '%sm' % si_format(x, 0))
def plot_channel_capacitance(channel_capacitance, vmax=(200e-15),
color_map=mcm.Reds_r, **kwargs):
vmax = max(2 * channel_capacitance.min(), vmax)
axis = plot_color_map_bars(channel_capacitance, color_map=color_map,
vmax=vmax, **kwargs)
axis.yaxis.set_major_formatter(F_formatter)
return axis
def plot_electrode_capacitance(df_shapes, channel_capacitance,
electrodes_by_channel, vmax=(200e-15),
color_map=mcm.Reds_r, **kwargs):
vmax = max(2 * channel_capacitance.min(), vmax)
electrode_ids = electrodes_by_channel.ix[channel_capacitance.index]
electrode_capacitance = pd.Series(channel_capacitance.ix
[electrode_ids.index].values,
index=electrode_ids.values)
df_shapes = df_shapes.copy()
# Scale millimeters to meters.
df_shapes[['x', 'y']] *= 1e-3
axis, colorbar = plot_shapes_heat_map(df_shapes, 'id',
electrode_capacitance,
value_formatter=F_formatter,
vmax=vmax, color_map=color_map,
**kwargs)
axis.xaxis.set_major_formatter(m_formatter)
map(lambda t: t.set_rotation(90), axis.get_xticklabels())
axis.yaxis.set_major_formatter(m_formatter)
axis.set_aspect(True)
return axis, colorbar
def plot_capacitance_summary(data, fig=None, color_map=mcm.Reds_r,
vmax=200e-15, # 200fF
reduce_func='median'):
'''
| ---------- | ------------------------- |
| | Capacitance of |
| Device | channels (index order) |
| drawing | ------------------------- |
| | Capacitance of |
| | channels (C order) |
| ---------- | ------------------------- |
'''
# Get median capacitance reading for each channel.
channel_groups = data['channel_impedances'].groupby('channel_i')
channel_capacitance = getattr(channel_groups['capacitance'], reduce_func)()
vmax = max(.5 * (channel_capacitance.median() + channel_capacitance.min()),
vmax)
grid = GridSpec(2, 8)
if fig is None:
fig = plt.figure(figsize=(25, 10))
axes = [fig.add_subplot(grid[:, :3]),
fig.add_subplot(grid[0, 3:]),
fig.add_subplot(grid[1, 3:])]
def label_electrodes(axis, df_shapes, channels_by_electrode):
df_shape_min = df_shapes.groupby('id')[['x', 'y']].min() * 1e-3
df_shape_max = df_shapes.groupby('id')[['x', 'y']].max() * 1e-3
df_shape_centers = .5 * (df_shape_max + df_shape_min)
df_shape_centers.y = df_shapes.y.max() * 1e-3 - df_shape_centers.y
light_color = '#ffffff'
dark_color = '#000000'
values = channel_capacitance
norm = Normalize(min(values), vmax, clip=True)
colors = color_map(norm(values.values).filled())
lightness = pd.Series(colors[:, :3].mean(axis=1), index=values.index)
for electrode_i, (x_i, y_i) in df_shape_centers.iterrows():
channel_i = channels_by_electrode.ix[electrode_i]
axis.text(x_i, y_i, channel_i, horizontalalignment='center',
verticalalignment='center',
color=dark_color if channel_i in lightness.index and
lightness.ix[channel_i] > 0.5 else light_color)
plot_electrode_capacitance(data['shapes'],
channel_capacitance,
data['device/electrodes_by_channel'],
axis=axes[0], vmax=vmax)
# Label channel(s) for each electrode on heat map.
label_electrodes(axes[0], data['shapes'],
data['device/channels_by_electrode'])
# Plot channel capacitances as 2 bar charts, colored according to heat map.
# -------------------------------------------------------------------------
# The x-axis of the first bar chart is ordered by channel number.
plot_color_map_bars(channel_capacitance, color_map=color_map, axis=axes[1],
vmax=vmax)
channel_capacitance_ordered = channel_capacitance.sort_values()
# The x-axis of the second bar chart is ordered by channel capacitance.
plot_color_map_bars(channel_capacitance_ordered, color_map=color_map,
axis=axes[2], vmax=vmax)
def label_bars(axis, values, fontsize=8, **kwargs):
# Offset labels by 10% of the axis height.
trans_offset = offset_copy(axis.transData, fig=axis.get_figure(),
y=0.10)
for i, value_i in zip(axis.get_xticks(), values):
axis.text(i, value_i, F_formatter(value_i),
horizontalalignment='center', verticalalignment='bottom',
rotation=90, fontsize=fontsize, transform=trans_offset)
# Annotate capacitance bars with capacitance value.
label_bars(axes[1], channel_capacitance)
label_bars(axes[2], channel_capacitance_ordered)
for ax in axes:
ax.yaxis.set_major_formatter(F_formatter)
fig.tight_layout()
return axes
|
gpl-3.0
|
mlperf/training_results_v0.6
|
Google/benchmarks/gnmt/implementations/tpu-v3-512-gnmt/nmt/nmt.py
|
5
|
21486
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training.python.training import evaluation
from mlp_log import mlp_log
import estimator
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument(
"--warmup_steps",
type=int,
default=200,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_start", type=int, default=3000, help="step to start decay")
parser.add_argument(
"--decay_interval",
type=int,
default=400,
help="interval steps between 2 decays")
parser.add_argument(
"--decay_steps", type=int, default=5, help="number of decays")
parser.add_argument(
"--decay_factor", type=float, default=0.66, help="decay rate")
parser.add_argument(
"--max_train_epochs", type=int, default=8,
help="Maximum number of training epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=3534981,
help="Number of examples in one epoch")
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="", help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000.padded",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--use_preprocessed_data",
type="bool",
default=True,
help="Whether to use preprocessed training data.")
parser.add_argument(
"--out_dir", type=str, default=None, help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=48,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=48,
help="Max length of tgt sequences during training.")
parser.add_argument(
"--src_max_len_infer",
type=int,
default=160,
help="Max length of src sequences during inference.")
parser.add_argument(
"--tgt_max_len_infer",
type=int,
default=160,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--forget_bias", type=float, default=0.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=512, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=5,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets.")
parser.add_argument(
"--choose_buckets",
type=int,
default=1,
help="Choose from this number of length buckets per training step.")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Misc
parser.add_argument(
"--num_shards", type=int,
default=8, help="Number of shards (TPU cores).")
parser.add_argument(
"--num_shards_per_host", type=int,
default=8, help="Number of shards (TPU cores) per host.")
parser.add_argument(
"--num_gpus", type=int, default=4, help="Number of gpus in each worker.")
parser.add_argument(
"--num_infeed_workers",
type=int,
default=1,
help="Number of TPU workers used for input generation.")
parser.add_argument(
"--num_tpu_workers",
type=int,
default=1,
help="Number of TPU workers; if set, uses the distributed-sync pipeline.")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=None,
help="Random seed (>0, set a specific seed).")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument(
"--infer_batch_size",
type=int,
default=512,
help="Batch size for inference mode.")
parser.add_argument(
"--examples_to_infer",
type=int,
default=3003,
help="Number of examples to infer.")
parser.add_argument("--detokenizer_file", type=str,
default="mosesdecoder/scripts/tokenizer/detokenizer.perl",
help=("""Detokenizer script file."""))
parser.add_argument("--use_borg", type=bool, default=False)
parser.add_argument(
"--target_bleu", type=float, default=24.0, help="Target accuracy.")
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
# TPU
parser.add_argument("--use_tpu", type=bool, default=True)
parser.add_argument("--master", type=str, default="",
help=("Address of the master. Either --master or "
"--tpu_name must be specified."))
parser.add_argument("--tpu_name", type=str, default=None,
help=("Name of the TPU for Cluster Resolvers. Either "
"--tpu_name or --master must be specified."))
parser.add_argument("--use_dynamic_rnn", type=bool, default=False)
parser.add_argument("--use_synthetic_data", type=bool, default=False)
parser.add_argument(
"--mode",
type=str,
default="train_and_eval",
choices=["train", "train_and_eval", "infer", "preprocess"])
parser.add_argument(
"--activation_dtype",
type=str,
default="bfloat16",
choices=["float32", "bfloat16"])
parser.add_argument("--tpu_job_name", type=str, default=None)
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.data_dir + flags.train_prefix,
test_prefix=flags.data_dir + flags.test_prefix,
vocab_prefix=flags.data_dir + flags.vocab_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Train
optimizer=flags.optimizer,
max_train_epochs=flags.max_train_epochs,
num_examples_per_epoch=flags.num_examples_per_epoch,
batch_size=flags.batch_size,
num_train_steps=int(flags.num_examples_per_epoch / flags.batch_size *
flags.max_train_epochs),
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
label_smoothing=flags.label_smoothing,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_start=flags.decay_start,
decay_interval=flags.decay_interval,
decay_steps=flags.decay_steps,
decay_factor=flags.decay_factor,
# Data constraints
num_buckets=flags.num_buckets,
choose_buckets=flags.choose_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
use_preprocessed_data=flags.use_preprocessed_data,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
examples_to_infer=flags.examples_to_infer,
detokenizer_file=flags.data_dir + flags.detokenizer_file,
use_borg=flags.use_borg,
target_bleu=flags.target_bleu,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
# Vocab
sos=vocab_utils.SOS,
eos=vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
# Misc
forget_bias=flags.forget_bias,
num_shards=flags.num_shards,
num_shards_per_host=flags.num_shards_per_host,
num_gpus=flags.num_gpus,
num_infeed_workers=flags.num_infeed_workers,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
random_seed=flags.random_seed,
# TPU
use_tpu=flags.use_tpu,
master=flags.master,
tpu_name=flags.tpu_name,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_synthetic_data=flags.use_synthetic_data,
mode=flags.mode,
activation_dtype=flags.activation_dtype,
tpu_job_name=flags.tpu_job_name)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers == hparams.num_decoder_layers
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from out_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def prepare_dataset(flags):
"""Generate the preprocessed dataset."""
src_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.src)
tgt_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.tgt)
vocab_file = flags.data_dir + flags.vocab_prefix
_, vocab_file = vocab_utils.check_vocab(vocab_file, flags.out_dir)
out_file = flags.out_dir + "preprocessed_dataset"
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(vocab_file)
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=1,
global_batch_size=1,
sos=vocab_utils.SOS,
eos=vocab_utils.EOS,
random_seed=1,
num_buckets=flags.num_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
filter_oversized_sequences=True,
return_raw=True).make_initializable_iterator()
with tf.Session() as sess:
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
try:
i = 0
while True:
with open(out_file + "_%d" % i, "wb") as f:
i += 1
for _ in range(100):
for j in sess.run(iterator.get_next()):
tf.logging.info(j)
f.write(bytearray(j))
except tf.errors.OutOfRangeError:
pass
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Job
jobid = flags.jobid
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
tf.set_random_seed(random_seed)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# TODO(dehao) move init time closer to model construction if necessary.
mlp_log.mlperf_print("init_start", None)
# Train or Evaluation
return estimator_fn(hparams)
def main(unused_argv):
# pylint: disable=g-long-lambda
if FLAGS.mode == "preprocess":
prepare_dataset(FLAGS)
elif FLAGS.mode == "train":
print("Running training mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "train_and_eval":
print("Running training and evaluation mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams,
estimator.train_and_eval_with_low_level_api)
else:
print("Running inference mode.")
default_hparams = create_hparams(FLAGS)
current_epoch = 0
last_step = 0
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(FLAGS.out_dir):
# Terminate eval job once target score is reached
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step <= last_step:
continue
last_step = current_step
tf.logging.info("Starting to evaluate...%s", ckpt)
try:
score = run_main(FLAGS, default_hparams, estimator.eval_fn)
current_epoch += 1
if score > FLAGS.target_bleu:
tf.logging.info(
"Evaluation finished after training step %d" % current_step)
break
# Terminate eval job when final checkpoint is reached
max_steps = default_hparams.num_train_steps
if current_step >= max_steps:
tf.logging.info(
"Evaluation finished but failed to reach target score.")
break
except tf.errors.NotFoundError:
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
mlp_log.mlperf_print("global_batch_size", FLAGS.batch_size)
mlp_log.mlperf_print("opt_learning_rate_alt_decay_func", "True")
mlp_log.mlperf_print("opt_base_learning_rate", FLAGS.learning_rate)
mlp_log.mlperf_print("opt_learning_rate_decay_interval", FLAGS.decay_interval)
mlp_log.mlperf_print("opt_learning_rate_decay_factor", FLAGS.decay_factor)
mlp_log.mlperf_print("opt_learning_rate_decay_steps", FLAGS.decay_steps)
mlp_log.mlperf_print("opt_learning_rate_remain_steps", FLAGS.decay_start)
mlp_log.mlperf_print("opt_learning_rate_alt_warmup_func", FLAGS.warmup_scheme)
mlp_log.mlperf_print("opt_learning_rate_warmup_steps", FLAGS.warmup_steps)
mlp_log.mlperf_print("max_sequence_length", FLAGS.src_max_len)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
detrout/debian-statsmodels
|
statsmodels/examples/ex_kernel_test_functional_li_wang.py
|
34
|
4162
|
# -*- coding: utf-8 -*-
"""Example TestFForm with Li Wang DGP1
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
trying to replicate some examples in
Li, Q., and Suojin Wang. 1998. "A Simple Consistent Bootstrap Test for a
Parametric Regression Function."
Journal of Econometrics 87 (1) (November): 145-165.
doi:10.1016/S0304-4076(98)00011-6.
currently DGP1
Monte Carlo with 100 replications
---------------------------------
results
598948
time 11.1642833312
[-0.72505981 0.26514944 0.45681704]
[ 0.74884796 0.22005569 0.3004892 ]
reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)
[[ 0.55 0.24 0.01]
[ 0.29 0.16 0.06]]
bw [ 0.11492364 0.11492364]
tst.test_stat -1.40274609515
Not Significant
tst.boots_results min, max -2.03386582198 2.32562183511
lower tail bootstrap p-value 0.077694235589
aymp.normal p-value (2-sided) 0.160692566481
mean and std in Li and Wang for n=1 are -0.764 and 0.621
results look reasonable now
Power
-----
true model: quadratic, estimated model: linear
498198
time 8.4588166674
[ 0.50374364 0.3991975 0.25373434]
[ 1.21353172 0.28669981 0.25461368]
reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)
[[ 0.66 0.78 0.82]
[ 0.46 0.61 0.74]]
bw [ 0.11492364 0.11492364]
tst.test_stat 0.505426717024
Not Significant
tst.boots_results min, max -1.67050998463 3.39835350718
lower tail bootstrap p-value 0.892230576441
upper tail bootstrap p-value 0.107769423559
aymp.normal p-value (2-sided) 0.613259157709
aymp.normal p-value (upper) 0.306629578855
"""
from __future__ import print_function
if __name__ == '__main__':
import time
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.1 #0.5 #0.1
nobs, k_vars = 100, 1
t0 = time.time()
b_res = []
for i in range(100):
x = np.random.uniform(0, 1, size=(nobs, k_vars))
x.sort(0)
order = 2
exog = x**np.arange(1, order + 1)
beta = np.array([2, -0.2])[:order+1-1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
mod_ols = OLS(endog, exog[:,:1])
#res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5]
bw_lw = [1./np.sqrt(12.) * nobs**(-0.2)]*2 #(-1. / 5.)
tst = smke.TestFForm(endog, exog[:,:1], bw=bw_lw, var_type='c',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=399)
b_res.append([tst.test_stat,
stats.norm.sf(tst.test_stat),
(tst.boots_results > tst.test_stat).mean()])
t1 = time.time()
b_res = np.asarray(b_res)
print('time', (t1 - t0) / 60.)
print(b_res.mean(0))
print(b_res.std(0))
print('reject at [0.2, 0.1, 0.05] (row 1: normal, row 2: bootstrap)')
print((b_res[:,1:,None] >= [0.2, 0.1, 0.05]).mean(0))
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results min, max', tst.boots_results.min(), tst.boots_results.max())
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
res_ols = mod_ols.fit()
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
|
bsd-3-clause
|
NeuralEnsemble/elephant
|
doc/conf.py
|
2
|
11596
|
# -*- coding: utf-8 -*-
#
# Elephant documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 5 17:11:26 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'nbsphinx',
'sphinx_tabs.tabs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Elephant'
authors = u'Elephant authors and contributors'
copyright = u"2014-{this_year}, {authors}".format(this_year=date.today().year,
authors=authors)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
root_dir = os.path.dirname(os.path.dirname(__file__))
with open(os.path.join(root_dir, 'elephant', 'VERSION')) as version_file:
# The full version, including alpha/beta/rc tags.
release = version_file.read().strip()
# The short X.Y version.
version = '.'.join(release.split('.')[:-1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'**.ipynb_checkpoints',
'maintainers_guide.rst', # should not be visible for users
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Only execute Jupyter notebooks that have no evaluated cells
nbsphinx_execute = 'auto'
# Kernel to use for execution
nbsphinx_kernel_name = 'python3'
# Cancel compile on errors in notebooks
nbsphinx_allow_errors = False
# Required to automatically create a summary page for each function listed in
# the autosummary fields of each module.
autosummary_generate = True
# Set to False to not overwrite the custom _toctree/*.rst
autosummary_generate_overwrite = True
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme_options = {
'font_family': 'Arial',
'page_width': '1200px', # default is 940
'sidebar_width': '280px', # default is 220
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/elephant_logo_sidebar.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/elephant_favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'elephantdoc'
# Suppresses wrong numpy doc warnings
# see here https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# A fix for Alabaster theme for no space between a citation reference
# and citation text
# https://github.com/sphinx-doc/sphinx/issues/6705#issuecomment-536197438
html4_writer = True
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'elephant.tex', u'Elephant Documentation',
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'elephant', u'Elephant Documentation',
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'Elephant',
u'Elephant Documentation',
authors,
'Elephant',
'Elephant is a package for the analysis of neurophysiology data.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = authors
epub_publisher = authors
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# configuration for intersphinx: refer to Viziphant
intersphinx_mapping = {
'viziphant': ('https://viziphant.readthedocs.io/en/latest/', None),
'numpy': ('https://numpy.org/doc/stable', None)
}
# Use more reliable mathjax source
mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# Remove the copyright notice from docstrings:
def process_docstring_remove_copyright(app, what, name, obj, options, lines):
copyright_line = None
for i, line in enumerate(lines):
if line.startswith(':copyright:'):
copyright_line = i
break
if copyright_line:
while len(lines) > copyright_line:
lines.pop()
def setup(app):
app.connect('autodoc-process-docstring',
process_docstring_remove_copyright)
|
bsd-3-clause
|
lewisc/spark-tk
|
regression-tests/sparktkregtests/testcases/frames/boxcox_test.py
|
12
|
5074
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test frame.box_cox() and frame.reverse_box_cox()"""
import unittest
from sparktkregtests.lib import sparktk_test
class BoxCoxTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(BoxCoxTest, self).setUp()
dataset =\
[[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],
[8.1583747730768401], [0.35889585616853292]]
schema = [("y", float)]
self.frame = self.context.frame.create(dataset, schema=schema)
def test_wt_default(self):
""" Test behaviour for default params, lambda = 0 """
self.frame.box_cox("y")
actual = self.frame.to_pandas()["y_lambda_0.0"].tolist()
expected =\
[1.7717791879837133, 2.1946810429706676,
2.1877697201262163, 2.0990449791729704, -1.0247230268174008]
self.assertItemsEqual(actual, expected)
def test_lambda(self):
""" Test wt for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
actual = self.frame.to_pandas()["y_lambda_0.3"].tolist()
expected =\
[2.3384668540844573, 3.1056915770236082,
3.0923547540771801, 2.9235756971904037, -0.88218677941017198]
self.assertItemsEqual(actual, expected)
def test_reverse_default(self):
""" Test reverse transform for default lambda = 0 """
self.frame.box_cox("y")
self.frame.reverse_box_cox("y_lambda_0.0",
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727425, 8.9771372790941815,
8.9153072947470804, 8.1583747730768401, 0.35889585616853298]
self.assertItemsEqual(actual, expected)
def test_reverse_lambda(self):
""" Test reverse transform for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
self.frame.reverse_box_cox("y_lambda_0.3", 0.3,
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727442, 8.9771372790941797,
8.9153072947470822, 8.1583747730768419,
0.35889585616853298]
self.assertItemsEqual(actual, expected)
@unittest.skip("req not clear")
def test_lambda_negative(self):
""" Test box cox for lambda -1 """
self.frame.box_cox("y", -1)
actual = self.frame.to_pandas()["y_lambda_-1.0"].tolist()
expected =\
[0.82996979614597488, 0.88860591423406388,
0.88783336715839256, 0.87742656744575354,
-1.7863236167608822]
self.assertItemsEqual(actual, expected)
def test_existing_boxcox_column(self):
""" Test behavior for existing boxcox column """
self.frame.box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.box_cox("y", 0.3)
def test_existing_reverse_column(self):
""" Test behavior for existing reverse boxcox column """
self.frame.reverse_box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.reverse_box_cox("y", 0.3)
@unittest.skip("Req not clear")
def test_negative_col_positive_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
frame.box_cox("y", 1)
actual = frame.to_pandas()["y_lambda_1.0"].tolist()
expected = [-2.0, -3.0, 0]
self.assertItemsEqual(actual, expected)
@unittest.skip("Req not clear")
def test_negative_col_frational_lambda(self):
"""Test behaviour for negative input column and negative lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y", 0.1)
@unittest.skip("Req not clear")
def test_negative_col_zero_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y")
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
bthirion/scikit-learn
|
sklearn/feature_selection/tests/test_base.py
|
98
|
3681
|
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
|
bsd-3-clause
|
jkibele/OpticalRS
|
OpticalRS/RasterAT.py
|
1
|
12582
|
# -*- coding: utf-8 -*-
"""
RasterAT
========
The `RAT` object will subclass `RasterDS` and add methods for handling Raster
Attribute Tables. The idea is to read and write RATs using GDAL but to represent
and manipulate them using pandas.
"""
from RasterDS import RasterDS
from GeoDFUtils import point_sample_raster
from GroundTruthShp import GroundTruthShapefile
import geopandas as gpd
from osgeo import gdal
import pandas as pd
import numpy as np
from scipy.ndimage import measurements
f_names = ['Name', 'PixelCount', 'ClassNumber', 'Red', 'Blue', 'Green', 'Alpha']
f_use = [gdal.GFU_Name, gdal.GFU_PixelCount, gdal.GFU_MinMax, gdal.GFU_Red,
gdal.GFU_Blue, gdal.GFU_Green, gdal.GFU_Alpha]
f_type = [gdal.GFT_String, gdal.GFT_Integer, gdal.GFT_Integer, gdal.GFT_Integer,
gdal.GFT_Integer, gdal.GFT_Integer, gdal.GFT_Integer]
f_use_d = dict(zip(f_names, f_use))
f_type_d = dict(zip(f_names, f_type))
class RAT(RasterDS):
def __init__(self, rlayer, overwrite=True):
RasterDS.__init__(self, rlayer, overwrite=overwrite)
self.ratdf = self.__get_or_create_rat()
def __open_gdal_ds(self):
self._RasterDS__open_gdal_ds()
def _erase_rat(self):
"""
Are you sure you want to do this? This doesn't fully work. It leaves
behind the *.tif.aux.xml file that, apparently, contains the actual RAT.
"""
band = self.gdal_ds.GetRasterBand(1)
band.SetDefaultRAT(None)
self.gdal_ds = None
self.gdal_ds = self._RasterDS__open_gdal_ds()
@property
def unique_values(self):
return np.unique(self.band_array.compressed())
def save_rat(self, df=None):
"""
Write the RAT to the GDAL file. For now, we're just assuming a single
band.
"""
if df is None:
df = self.ratdf
else:
self.ratdf = df
if not self.overwrite:
raise ValueError("RasterAT object is not set to allow overwriting of its file.")
band = self.gdal_ds.GetRasterBand(1)
grat = df_to_gdal_rat(df)
ret = band.SetDefaultRAT(grat)
if ret == gdal.CE_None:
self.gdal_ds = None
self.gdal_ds = self._RasterDS__open_gdal_ds()
return True
else:
return False
def __create_rat(self):
"""
Create a default pandas RAT for a raster that doesn't already have one.
"""
# columns and index
cols = list(f_names)
cols.remove('ClassNumber') #This is going to be the index
df = pd.DataFrame(columns=cols, index=self.unique_values)
# PixelCount
bins = np.append(self.unique_values, self.unique_values.max() +1)
pcnt = np.histogram(self.band_array.compressed(), bins=bins)[0]
df.PixelCount = pcnt
# Colors
df[['Red','Green','Blue','Alpha']] = np.random.randint(0, 255, (len(df), 4))
df.index.name = 'ClassNumber'
return df
def __read_rat(self):
"""
Read gdal rat if there is one and return it as a pandas dataframe.
Return `None` if there is no rat.
"""
band = self.gdal_ds.GetRasterBand(1)
grat = band.GetDefaultRAT()
if grat is not None:
return gdal_rat_to_df(grat)
else:
return None
def __get_or_create_rat(self):
readrat = self.__read_rat()
if readrat is None:
return self.__create_rat()
else:
return readrat
def properties_df(self, img, func=np.mean, prefix=None, postfix='_b',
colnames=None):
"""
Sample values from `img` for each segment (a.k.a. class or class number)
in the RAT. `func` is used on the `img` pixels to produce a single value
for each segment.
"""
if isinstance(img, np.ndarray):
img = img.copy()
elif isinstance(img, RasterDS):
img = img.band_array
else:
img = RasterDS(img).band_array
img = np.atleast_3d(img)
labels = self.band_array.squeeze()
nbands = img.shape[-1]
if colnames == None:
if prefix == None:
try:
prefix = func.__name__
except AttributeError:
prefix = ''
colnames = [prefix+postfix+str(i+1) for i in range(nbands)]
ddict = {}
for bnum in range(nbands):
band = img[...,bnum]
coln = colnames[bnum]
ind = self.ratdf.index.to_series().as_matrix()
ddict[coln] = band_label_properties(labels, band, ind, func)
newdf = pd.DataFrame(ddict, columns=colnames, index=self.ratdf.index)
return newdf
def column_array(self, cols, df=None, out_mask=None, default=0):
"""
Produce an image array from values in the RAT. Instead of the integer
`ClassNumber` values from the segments, the image array will contain
values from the specified column. When multiple columns (`cols`) are
passed, the result will be a multi-band image array.
Parameters
----------
cols : string or interable of strings
The dataframe columns to pull values from. Each string needs to
correspond to a column in the RAT or, if `df` has been passed,
if must be a column in `df`.
df : pandas dataframe
The dataframe from which raster values will be pulled. If `None`
(default), the dataframe representation of the RAT will be used.
Returns
-------
numpy array
An image array of size (Rows, Columns, nBands) where nBands =
`len(cols)`.
"""
if type(cols) == str:
cols = [cols]
if type(df) == type(None):
df = self.ratdf
lblarr = self.band_array.squeeze()
noutbands = len(cols)
outshape = lblarr.shape + (noutbands,)
outarr = np.ma.zeros(outshape)
idxset = set(df.index)
for i, col in enumerate(cols):
# outarr[...,i] = np.vectorize(df[col].to_dict().get)(lblarr)
bnd = outarr[:,:,i]
for idx in idxset:
bnd[lblarr==idx] = df.loc[idx, col]
if out_mask is not None:
out_mask = np.repeat(np.atleast_3d(out_mask.astype('bool')), noutbands, axis=2)
outarr.mask = out_mask
# for i, col in enumerate(cols):
# bnd = outarr[...,i] # view, not copy so changes to bnd will
# # be reflected in outarr, ...I think.
# for cn in df.index.astype('int'):
# idx = np.index_exp[bndarr==cn]
# bnd[idx] = df.loc[cn, col]
# if np.ma.is_masked(bndarr):
# bnd.mask = bndarr.mask
# if np.ma.is_masked(bndarr):
# outmask = np.repeat(np.atleast_3d(bndarr), noutbands, axis=2)
# outarr.mask = outmask
## This worked but it was very slow.
# outarr = np.repeat(self.band_array.astype(float), len(cols), axis=2)
# for i, col in enumerate(cols):
# def class_map(classnum):
# if classnum in df.index:
# return df.loc[int(classnum), col]
# else:
# return np.nan
# vclass_map = np.vectorize(class_map)
# outarr[...,i] = vclass_map(outarr[...,i])
return outarr
def bps_habitat_df(self, bps_shp, hab_cols=None):
"""
Build a data frame with aggregated habitat values from a BPS shapefile.
If `hab_cols` is not specified, all columns of `bps_shp` that start with
a capital letter will be assumed to represent habitats.
Parameters
----------
bps_shp : string (file path), GeoDataFrame, or GroundTruthShapefile
Whichever type is passed, will be used to attempt to creat a
geodataframe. This needs to be a point shapefile. Typically, one
created by [Benthic Photo Survey](http://openresearchsoftware.metajnl.com/articles/10.5334/jors.104/)
hab_cols : list of strings
These are the names of the habitat columns in the BPS file. If left
as `None`, all captiolized columns with numeric values will be
assumed to be habitats.
Returns
-------
pandas DataFram
A data frame with habitat columns and a point count column. The
point count column contains the number of BPS points in each
segment (ClassNumber). The habitat columns contain the average of
the habitat values for the BPS points in the corresponding segment.
"""
if type(bps_shp) == gpd.geodataframe.GeoDataFrame:
bpsdf = bps_shp
elif type(bps_shp) == GroundTruthShapefile:
bpsdf = bps_shp.geo_data_frame
else:
bpsdf = gpd.read_file(bps_shp)
if hab_cols is None:
# This will create a list of numeric columns with names that
# are capitolized. This works because I tend to set up my BPS
# habitat names with capitolized names. ...but it won't work
# right if you haven't set up BPS that way.
hab_cols = [ c for c in bpsdf.select_dtypes(include=[np.number]).columns if c[0].isupper() ]
# if habitats haven't been assigned, we don't want to use those rows.
bpsdf.dropna('rows', subset=hab_cols, inplace=True)
# get the segment number for each point
bpsdf = point_sample_raster(bpsdf, self, col_names=['ClassNumber'])
# group the BPS point by the segment they're in
grp = bpsdf[hab_cols + ['ClassNumber']].groupby('ClassNumber')
# count how many points are in each segment
pntcnt = grp.count().ix[:,0]
pntcnt.name = 'point_count'
# average the hab columns for each segment
habmeans = grp.mean()
seghabs = habmeans.join(pntcnt)
return seghabs
def band_label_properties(labels, band, ind=None, func=np.mean,
outdtype=np.float, default=0.0):
if type(ind) == type(None):
ind = np.unique(labels.compressed())
proparr = measurements.labeled_comprehension(band, labels, ind, func, outdtype, default)
return pd.Series(proparr, index=ind)
def dtype_map(typ):
if type(typ) == np.dtype:
# map numpy to GFT
if typ.kind in ['i', 'u']:
return gdal.GFT_Integer
elif typ.kind in ['f', 'c']:
return gdal.GFT_Real
else:
return gdal.GFT_String
else:
#map GFT to numpy
if typ == gdal.GFT_Integer:
return np.dtype('int32')
elif typ == gdal.GFT_Real:
return np.dtype('float32')
else:
# I'm using object instead of string so I can represent np.NaN
return np.dtype('O')
def df_to_gdal_rat(df):
df = df.copy()
if 'ClassNumber' not in df.columns:
df['ClassNumber'] = df.index
rat = gdal.RasterAttributeTable()
rat.SetRowCount(len(df))
for num, col in enumerate(df.columns):
gftype = dtype_map(df[col].dtype)
if col in f_names:
usetype = f_use_d[col]
else:
usetype = gdal.GFU_Generic
# have to call str(col) because CreateColumn can't take unicode
rat.CreateColumn(str(col), gftype, usetype)
rat.WriteArray(df[col].tolist(), num)
return rat
def gdal_rat_to_df(grat):
dfdict = {}
idx = None
for colnum in range(grat.GetColumnCount()):
colname = grat.GetNameOfCol(colnum)
coldtype = dtype_map(grat.GetTypeOfCol(colnum))
coluse = grat.GetUsageOfCol(colnum)
# print "{}: {}".format(colname, coldtype)
if coluse == gdal.GFU_MinMax:
idx = grat.ReadAsArray(colnum)
elif coldtype == np.dtype('O'):
# fix 'nan' strings
arr = grat.ReadAsArray(colnum).astype('O', copy=False)
arr[arr=='nan'] = np.NaN
dfdict[colname] = arr
else:
dfdict[colname] = grat.ReadAsArray(colnum)
# I want to order the columns in a sensible way
stdcols = list(f_names)
stdcols.remove('ClassNumber')
customcols = [c for c in dfdict.keys() if c not in stdcols]
colord = customcols
colord.extend(stdcols)
df = pd.DataFrame(dfdict, index=idx)[colord]
df.index.name = "ClassNumber"
return df
|
bsd-3-clause
|
kanak87/oldboy_rep
|
test_nolearn/read_iu.py
|
1
|
3563
|
import pickle
from PIL import Image
import cv2
from matplotlib import pyplot
__author__ = 'yongeun'
import os
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
FTRAIN = './data/kaggle-facial-keypoint-detection/training.csv'
FTEST = './data/kaggle-facial-keypoint-detection/test.csv'
iuImage = cv2.imread("iu.jpeg")
iuImage = cv2.resize(iuImage, (96, 96))
iuImage = cv2.cvtColor(iuImage, cv2.COLOR_BGR2GRAY)
iuImageArray = []
for y in range(0, 96, 1):
for x in range(0, 96, 1):
iuImageArray.append( (iuImage[y][x]/255.) )
img = np.array(iuImageArray)
img = img.astype(np.float32)
pyplot.figure()
pyplot.imshow(iuImage, cmap='gray')
pyplot.show()
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load pandas dataframe
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # get a subset of columns
df = df[list(cols) + ['Image']]
print(df.count()) # prints the number of values for each column
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
if not test: # only FTRAIN has any target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
'''
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
'''
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 9216), # 96x96 input pixels per batch
hidden_num_units=100, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=30, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
'''
X, y = load()
net1.fit(X, y)
with open('net1.pickle', 'wb') as f:
pickle.dump(net1, f, -1)
'''
net1 = pickle.load( open( "net1.pickle", "rb" ) )
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
'''
X, _ = load(test=True)
y_pred = net1.predict(X)
'''
X = np.vstack([img])
y_pred = net1.predict(X)
fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(1):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(X[i], y_pred[i], ax)
pyplot.show()
|
mit
|
jhamman/xray
|
xarray/core/ops.py
|
1
|
14573
|
"""Define core operations for xarray objects.
TODO(shoyer): rewrite this module, making use of xarray.core.computation,
NumPy's __array_ufunc__ and mixin classes instead of the unintuitive "inject"
functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from . import dtypes
from . import duck_array_ops
from .pycompat import PY3
from .nputils import array_eq, array_ne
try:
import bottleneck as bn
has_bottleneck = True
except ImportError:
# use numpy methods instead
bn = np
has_bottleneck = False
UNARY_OPS = ['neg', 'pos', 'abs', 'invert']
CMP_BINARY_OPS = ['lt', 'le', 'ge', 'gt']
NUM_BINARY_OPS = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod',
'pow', 'and', 'xor', 'or']
if not PY3:
NUM_BINARY_OPS.append('div')
# methods which pass on the numpy return value unchanged
# be careful not to list methods that we would want to wrap later
NUMPY_SAME_METHODS = ['item', 'searchsorted']
# methods which don't modify the data shape, so the result should still be
# wrapped in an Variable/DataArray
NUMPY_UNARY_METHODS = ['astype', 'argsort', 'clip', 'conj', 'conjugate']
PANDAS_UNARY_FUNCTIONS = ['isnull', 'notnull']
# methods which remove an axis
REDUCE_METHODS = ['all', 'any']
NAN_REDUCE_METHODS = ['argmax', 'argmin', 'max', 'min', 'mean', 'prod', 'sum',
'std', 'var', 'median']
NAN_CUM_METHODS = ['cumsum', 'cumprod']
BOTTLENECK_ROLLING_METHODS = {'move_sum': 'sum', 'move_mean': 'mean',
'move_std': 'std', 'move_min': 'min',
'move_max': 'max', 'move_var': 'var',
'move_argmin': 'argmin', 'move_argmax': 'argmax'}
# TODO: wrap take, dot, sort
_CUM_DOCSTRING_TEMPLATE = """\
Apply `{name}` along some dimension of {cls}.
Parameters
----------
{extra_args}
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
cumvalue : {cls}
New {cls} object with `{name}` applied to its data along the
indicated dimension.
"""
_REDUCE_DOCSTRING_TEMPLATE = """\
Reduce this {cls}'s data by applying `{name}` along some dimension(s).
Parameters
----------
{extra_args}
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating `{name}` on this object's data.
Returns
-------
reduced : {cls}
New {cls} object with `{name}` applied to its data and the
indicated dimension(s) removed.
"""
_ROLLING_REDUCE_DOCSTRING_TEMPLATE = """\
Reduce this {da_or_ds}'s data windows by applying `{name}` along its dimension.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
reduced : {da_or_ds}
New {da_or_ds} object with `{name}` applied along its rolling dimnension.
"""
def fillna(data, other, join="left", dataset_join="left"):
"""Fill missing values in this object with data from the other object.
Follows normal broadcasting and alignment rules.
Parameters
----------
join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining the indexes of the passed objects along each
dimension
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
"""
from .computation import apply_ufunc
return apply_ufunc(duck_array_ops.fillna, data, other,
join=join,
dask_array="allowed",
dataset_join=dataset_join,
dataset_fill_value=np.nan,
keep_attrs=True)
def where_method(self, cond, other=dtypes.NA):
"""Return elements from `self` or `other` depending on `cond`.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this objects values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, inserts missing values.
Returns
-------
Same type as caller.
"""
from .computation import apply_ufunc
# alignment for three arguments is complicated, so don't support it yet
join = 'inner' if other is dtypes.NA else 'exact'
return apply_ufunc(duck_array_ops.where_method,
self, cond, other,
join=join,
dataset_join=join,
dask_array='allowed',
keep_attrs=True)
def _call_possibly_missing_method(arg, name, args, kwargs):
try:
method = getattr(arg, name)
except AttributeError:
duck_array_ops.fail_on_dask_array_input(arg, func_name=name)
if hasattr(arg, 'data'):
duck_array_ops.fail_on_dask_array_input(arg.data, func_name=name)
raise
else:
return method(*args, **kwargs)
def _values_method_wrapper(name):
def func(self, *args, **kwargs):
return _call_possibly_missing_method(self.data, name, args, kwargs)
func.__name__ = name
func.__doc__ = getattr(np.ndarray, name).__doc__
return func
def _method_wrapper(name):
def func(self, *args, **kwargs):
return _call_possibly_missing_method(self, name, args, kwargs)
func.__name__ = name
func.__doc__ = getattr(np.ndarray, name).__doc__
return func
def _func_slash_method_wrapper(f, name=None):
# try to wrap a method, but if not found use the function
# this is useful when patching in a function as both a DataArray and
# Dataset method
if name is None:
name = f.__name__
def func(self, *args, **kwargs):
try:
return getattr(self, name)(*args, **kwargs)
except AttributeError:
return f(self, *args, **kwargs)
func.__name__ = name
func.__doc__ = f.__doc__
return func
def rolling_count(rolling):
not_null = rolling.obj.notnull()
instance_attr_dict = {'center': rolling.center,
'min_periods': rolling.min_periods,
rolling.dim: rolling.window}
rolling_count = not_null.rolling(**instance_attr_dict).sum()
if rolling.min_periods is None:
return rolling_count
# otherwise we need to filter out points where there aren't enough periods
# but not_null is False, and so the NaNs don't flow through
# array with points where there are enough values given min_periods
enough_periods = rolling_count >= rolling.min_periods
return rolling_count.where(enough_periods)
def inject_reduce_methods(cls):
methods = ([(name, getattr(duck_array_ops, 'array_%s' % name), False)
for name in REDUCE_METHODS] +
[(name, getattr(duck_array_ops, name), True)
for name in NAN_REDUCE_METHODS] +
[('count', duck_array_ops.count, False)])
for name, f, include_skipna in methods:
numeric_only = getattr(f, 'numeric_only', False)
func = cls._reduce_method(f, include_skipna, numeric_only)
func.__name__ = name
func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format(
name=name, cls=cls.__name__,
extra_args=cls._reduce_extra_args_docstring.format(name=name))
setattr(cls, name, func)
def inject_cum_methods(cls):
methods = ([(name, getattr(duck_array_ops, name), True)
for name in NAN_CUM_METHODS])
for name, f, include_skipna in methods:
numeric_only = getattr(f, 'numeric_only', False)
func = cls._reduce_method(f, include_skipna, numeric_only)
func.__name__ = name
func.__doc__ = _CUM_DOCSTRING_TEMPLATE.format(
name=name, cls=cls.__name__,
extra_args=cls._cum_extra_args_docstring.format(name=name))
setattr(cls, name, func)
def op_str(name):
return '__%s__' % name
def get_op(name):
return getattr(operator, op_str(name))
NON_INPLACE_OP = dict((get_op('i' + name), get_op(name))
for name in NUM_BINARY_OPS)
def inplace_to_noninplace_op(f):
return NON_INPLACE_OP[f]
def inject_binary_ops(cls, inplace=False):
for name in CMP_BINARY_OPS + NUM_BINARY_OPS:
setattr(cls, op_str(name), cls._binary_op(get_op(name)))
for name, f in [('eq', array_eq), ('ne', array_ne)]:
setattr(cls, op_str(name), cls._binary_op(f))
for name in NUM_BINARY_OPS:
# only numeric operations have in-place and reflexive variants
setattr(cls, op_str('r' + name),
cls._binary_op(get_op(name), reflexive=True))
if inplace:
setattr(cls, op_str('i' + name),
cls._inplace_binary_op(get_op('i' + name)))
def inject_all_ops_and_reduce_methods(cls, priority=50, array_only=True):
# prioritize our operations over those of numpy.ndarray (priority=1)
# and numpy.matrix (priority=10)
cls.__array_priority__ = priority
# patch in standard special operations
for name in UNARY_OPS:
setattr(cls, op_str(name), cls._unary_op(get_op(name)))
inject_binary_ops(cls, inplace=True)
# patch in numpy/pandas methods
for name in NUMPY_UNARY_METHODS:
setattr(cls, name, cls._unary_op(_method_wrapper(name)))
for name in PANDAS_UNARY_FUNCTIONS:
f = _func_slash_method_wrapper(getattr(pd, name))
setattr(cls, name, cls._unary_op(f))
f = _func_slash_method_wrapper(duck_array_ops.around, name='round')
setattr(cls, 'round', cls._unary_op(f))
if array_only:
# these methods don't return arrays of the same shape as the input, so
# don't try to patch these in for Dataset objects
for name in NUMPY_SAME_METHODS:
setattr(cls, name, _values_method_wrapper(name))
inject_reduce_methods(cls)
inject_cum_methods(cls)
def inject_bottleneck_rolling_methods(cls):
# standard numpy reduce methods
methods = [(name, getattr(duck_array_ops, name))
for name in NAN_REDUCE_METHODS]
for name, f in methods:
func = cls._reduce_method(f)
func.__name__ = name
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='DataArray')
setattr(cls, name, func)
# bottleneck doesn't offer rolling_count, so we construct it ourselves
func = rolling_count
func.__name__ = 'count'
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='DataArray')
setattr(cls, 'count', func)
# bottleneck rolling methods
if has_bottleneck:
# TODO: Bump the required version of bottlneck to 1.1 and remove all
# these version checks (see GH#1278)
bn_version = LooseVersion(bn.__version__)
bn_min_version = LooseVersion('1.0')
bn_version_1_1 = LooseVersion('1.1')
if bn_version < bn_min_version:
return
for bn_name, method_name in BOTTLENECK_ROLLING_METHODS.items():
try:
f = getattr(bn, bn_name)
func = cls._bottleneck_reduce(f)
func.__name__ = method_name
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='DataArray')
setattr(cls, method_name, func)
except AttributeError as e:
# skip functions not in Bottleneck 1.0
if ((bn_version < bn_version_1_1) and
(bn_name not in ['move_var', 'move_argmin',
'move_argmax', 'move_rank'])):
raise e
# bottleneck rolling methods without min_count (bn.__version__ < 1.1)
f = getattr(bn, 'move_median')
if bn_version >= bn_version_1_1:
func = cls._bottleneck_reduce(f)
else:
func = cls._bottleneck_reduce_without_min_count(f)
func.__name__ = 'median'
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='DataArray')
setattr(cls, 'median', func)
def inject_datasetrolling_methods(cls):
# standard numpy reduce methods
methods = [(name, getattr(duck_array_ops, name))
for name in NAN_REDUCE_METHODS]
for name, f in methods:
func = cls._reduce_method(f)
func.__name__ = name
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='Dataset')
setattr(cls, name, func)
# bottleneck doesn't offer rolling_count, so we construct it ourselves
func = rolling_count
func.__name__ = 'count'
func.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(
name=func.__name__, da_or_ds='Dataset')
setattr(cls, 'count', func)
|
apache-2.0
|
rmcgibbo/scipy
|
doc/source/tutorial/examples/normdiscr_plot2.py
|
84
|
1642
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
|
bsd-3-clause
|
PatrickOReilly/scikit-learn
|
examples/decomposition/plot_incremental_pca.py
|
175
|
1974
|
"""
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/pandas/core/reshape/tile.py
|
3
|
13759
|
"""
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
([(0.191, 3.367], (0.191, 3.367], (0.191, 3.367], (3.367, 6.533],
(6.533, 9.7], (0.191, 3.367]]
Categories (3, object): [(0.191, 3.367] < (3.367, 6.533] < (6.533, 9.7]],
array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ]))
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3,
labels=["good","medium","bad"])
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1], dtype=int64)
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
[[0, 1], [0, 1], (1, 2], (2, 3], (3, 4]]
Categories (4, object): [[0, 1] < (1, 2] < (2, 3] < (3, 4]]
>>> pd.qcut(range(5), 3, labels=["good","medium","bad"])
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3], dtype=int64)
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins)))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isnull(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
seperately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
x = np.asarray(x)
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
|
mit
|
neilhan/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py
|
25
|
3654
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", tf.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", tf.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", tf.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(tf.test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(mocks.MockTensor("Mock Tensor 2", tf.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", tf.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {"a": mocks.MockTensor("Mock Tensor 1", tf.int32),
"b": mocks.MockTensor("Mock Tensor 2", tf.int32),
"c": mocks.MockTensor("Mock Tensor 1", tf.int32)}
self.assertEqual(expected, result)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
kcavagnolo/astroML
|
book_figures/chapter4/fig_bootstrap_gaussian.py
|
4
|
2843
|
r"""
Bootstrap Calculations of Error on Mean
---------------------------------------
Figure 4.3.
The bootstrap uncertainty estimates for the sample standard deviation
:math:`\sigma` (dashed line; see eq. 3.32) and :math:`\sigma_G` (solid line;
see eq. 3.36). The sample consists of N = 1000 values drawn from a Gaussian
distribution with :math:`\mu = 0` and :math:`\sigma = 1`. The bootstrap
estimates are based on 10,000 samples. The thin lines show Gaussians with
the widths determined as :math:`s / \sqrt{2(N - 1)}` (eq. 3.35) for
:math:`\sigma` and :math:`1.06 s / \sqrt{N}` (eq. 3.37) for :math:`\sigma_G`.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from astroML.resample import bootstrap
from astroML.stats import sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
m = 1000 # number of points
n = 10000 # number of bootstraps
#------------------------------------------------------------
# sample values from a normal distribution
np.random.seed(123)
data = norm(0, 1).rvs(m)
#------------------------------------------------------------
# Compute bootstrap resamplings of data
mu1_bootstrap = bootstrap(data, n, np.std, kwargs=dict(axis=1, ddof=1))
mu2_bootstrap = bootstrap(data, n, sigmaG, kwargs=dict(axis=1))
#------------------------------------------------------------
# Compute the theoretical expectations for the two distributions
x = np.linspace(0.8, 1.2, 1000)
sigma1 = 1. / np.sqrt(2 * (m - 1))
pdf1 = norm(1, sigma1).pdf(x)
sigma2 = 1.06 / np.sqrt(m)
pdf2 = norm(1, sigma2).pdf(x)
#------------------------------------------------------------
# Plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.hist(mu1_bootstrap, bins=50, normed=True, histtype='step',
color='blue', ls='dashed', label=r'$\sigma\ {\rm (std. dev.)}$')
ax.plot(x, pdf1, color='gray')
ax.hist(mu2_bootstrap, bins=50, normed=True, histtype='step',
color='red', label=r'$\sigma_G\ {\rm (quartile)}$')
ax.plot(x, pdf2, color='gray')
ax.set_xlim(0.82, 1.18)
ax.set_xlabel(r'$\sigma$')
ax.set_ylabel(r'$p(\sigma|x,I)$')
ax.legend()
plt.show()
|
bsd-2-clause
|
JackWalpole/splitwavepy
|
splitwavepy/measure/xconvM.py
|
1
|
7716
|
# -*- coding: utf-8 -*-
"""
Grid search for params that best model waveform using cross-convolution method.
Menke and Levin (2004)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..core import core, io
from ..core.pair import Pair
from ..core.window import Window
from .measure import Measure
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os.path
class ConvM(Measure):
"""
Menke and Levin (1991) single layer cross convolution grid search.
args:
None = create synthetic
Pair = Measure splitting on Pair object
x, y = Measure splitting on traces x, and y.
kwargs:
name -- string = 'Untitled'
lags -- tuple = (maxlag,)
-- tuple = (maxlag,Nlags)
-- tuple = (minlag,maxlag,Nlags)
-- numpy ndarray
degs -- int = degs
-- numpy ndarray
rcvcorr = (fast,tlag) | tuple | Receiver Correction
srccorr = (fast,tlag) | tuple | Source Correction
kwargs for synthetic generation:
fast = 0. | float
tlag = 0. | float
pol = 0. | float
noise = 0.001 | float
"""
def __init__(self,*args,**kwargs):
# process input
if 'pol' not in kwargs: raise Exception('Polarisation must be specified, e.g., pol=30.')
# self.pol = kwargs['pol']
# process input
if len(args) == 1 and isinstance(args[0],Pair):
self.data = args[0]
else:
self.data = Pair(*args,**kwargs)
# Derive from Measure
Measure.__init__(self, *args, **kwargs)
# MAKE MEASUREMENT
stuff = np.asarray(self.onelayer(core.crossconvmf,**kwargs))
self.mf = stuff[:,:,0].T
maxloc = core.min_idx(self.mf)
#
# # get some measurement attributes
# # Using signal to noise ratio in 2-D inspired by 3-D treatment of:
# # Jackson, Mason, and Greenhalgh, Geophysics (1991)
# self.snrsurf = (self.lam1-self.lam2) / (2*self.lam2)
# maxloc = core.max_idx(self.snrsurf)
self.fast = self.degs[maxloc]
self.lag = self.lags[maxloc]
# self.snr = self.snrsurf[maxloc]
# # get errors
self.errsurf = self.lam2
self.dfast, self.dlag = self.get_errors(surftype='min')
# Name
self.name = 'Untitled'
if 'name' in kwargs: self.name = kwargs['name']
# def conf_95(self):
# """Value of lam2 at 95% confidence contour."""
# return core.ftest(self.lam2, self.ndf(), alpha=0.05)
#
# # auto null classification
#
# def ni(self):
# """
# development.
# measure of self-similarity in measurements at 90 degree shift in fast direction
# """
# fastprof = self.fastprofile()
# halfway = int(self.degs.shape[1]/2)
# diff = fastprof - np.roll(fastprof,halfway)
# mult = fastprof * np.roll(fastprof,halfway)
# sumdiffsq = np.sum(diff**2)
# summult = np.sum(mult)
# return summult/sumdiffsq
#
def gridsearchsynth(self, func, **kwargs):
"""
Grid search for splitting parameters applied to data using the function defined in func
rcvcorr = receiver correction parameters in tuple (fast,lag)
srccorr = source correction parameters in tuple (fast,lag)
"""
# avoid using "dots" in loops for performance
synth = core.synth
# rotate = core.rotate
# lag = core.lag
chop = core.chop
unsplit = core.unsplit
# ensure trace1 at zero angle
copy = self.data.copy()
copy.rotateto(0)
x, y = copy.x, copy.y
# pre-apply receiver correction
if 'rcvcorr' in kwargs:
rcvphi, rcvlag = self.__rcvcorr
x, y = unsplit(x, y, rcvphi, rcvlag)
######################
# inner loop function
######################
# source correction
if 'srccorr' in kwargs:
srcphi, srclag = self.__srccorr
def srccorr(x, y, ang):
x, y = unsplit(x, y, srcphi-ang, srclag)
return x, y
else:
def srccorr(x, y, ang):
return x, y
# actual inner loop function
def getout(x, y, ang, shift):
# generate synthetics
mx, my = synth(split=(ang,shift), pol=pol, noise=0, delta=delta)
mx, my = chop(mx, my, window=self.data.window)
x, y = srccorr(x, y, ang)
x, y = chop(x, y, window=self.data.window)
return func(x, y)
# Do the grid search
prerot = [ (rotate(x, y, ang), ang) for ang in self.__degs ]
out = [ [ getout(data[0], data[1], ang, shift) for shift in self.__slags ]
for (data,ang) in prerot ]
return out
# Plotting
def plot(self,**kwargs):
# setup figure and subplots
fig = plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, 3,
width_ratios=[1,1,2]
)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[1,1])
ax4 = plt.subplot(gs[:,2])
# data to plot
d1 = self.data.chop()
d1f = self.srcpoldata().chop()
d2 = self.data_corr().chop()
d2s = self.srcpoldata_corr().chop()
# flip polarity of slow wave in panel one if opposite to fast
# d1f.y = d1f.y * np.sign(np.tan(self.srcpol()-self.fast))
# get axis scaling
lim = np.abs(d2s.data()).max() * 1.1
ylim = [-lim,lim]
# original
d1f._ptr(ax0,ylim=ylim,**kwargs)
d1._ppm(ax1,lims=ylim,**kwargs)
# corrected
d2s._ptr(ax2,ylim=ylim,**kwargs)
d2._ppm(ax3,lims=ylim,**kwargs)
# error surface
if 'vals' not in kwargs:
# kwargs['vals'] = (self.lam1 - self.lam2) / self.lam2
# kwargs['title'] = r'$(\lambda_1 - \lambda_2) / \lambda_2$'
kwargs['vals'] = self.lam1 / self.lam2
kwargs['title'] = r'Misfit'
# add marker and info box by default
if 'marker' not in kwargs: kwargs['marker'] = True
if 'info' not in kwargs: kwargs['info'] = True
if 'conf95' not in kwargs: kwargs['conf95'] = True
self._psurf(ax4,**kwargs)
# title
if self.name != 'Untitled':
plt.suptitle(self.name)
# neaten
plt.tight_layout()
plt.show()
# def plot_profiles(self,**kwargs):
# # Error analysis
# fig,ax = plt.subplots(2)
# ax0 = plt.subplot(121)
# ax1 = plt.subplot(122)
#
# ax0.plot(self.degs[0,:],self.fastprofile())
# ax0.axvline(self.fast)
# ax0.axvline(self.fast-2*self.dfast,alpha=0.5)
# ax0.axvline(self.fast+2*self.dfast,alpha=0.5)
# ax0.set_title('fast direction')
#
# ax1.plot(self.lags[:,0],self.lagprofile())
# ax1.axvline(self.lag)
# ax1.axvline(self.lag-2*self.dlag,alpha=0.5)
# ax1.axvline(self.lag+2*self.dlag,alpha=0.5)
# ax1.set_title('lag direction')
#
# plt.show()
|
mit
|
bhargav/scikit-learn
|
benchmarks/bench_plot_randomized_svd.py
|
38
|
17557
|
"""
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
|
bsd-3-clause
|
dandanvidi/capacity-usage
|
scripts/MOMA.py
|
3
|
6272
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 11:19:09 2016
@author: dan
"""
import pandas as pd
import pulp
import os
from cobra.io.sbml import create_cobra_model_from_sbml_file
import matplotlib
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
import matplotlib.pyplot as plt
from cobra.manipulation.modify import convert_to_irreversible
matplotlib.rcParams['text.usetex'] = False
MEAS_FLUX_L = 'measured fluxes from Gerosa et al.'
MEAS_STDEV_L = 'standard deviation'
PRED_FLUX_L = 'projected fluxes'
RESID_L = 'residual'
gc = pd.DataFrame.from_csv("../data/carbon_sources.csv")
MFA = pd.DataFrame.from_csv('../data/MFA_Gerosa_et_al_2015.csv', header=0, index_col=0)
MFA_std = pd.DataFrame.from_csv('../data/MFA_Gerosa_et_al_2015_deviations.csv', header=0, index_col=0)
#%%
if __name__ == '__main__':
mmol_gCDW_h = {}
for condition in gc.index:
mmol_gCDW_h[condition] = {}
cs = gc["media_key"][condition]
gr = gc["growth rate [h-1]"][condition]
gr_stdev = gc["growth rate stdev [h-1]"][condition]
model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
model.reactions.get_by_id('EX_glc_e').lower_bound = 0
model.reactions.get_by_id('EX_' + cs + '_e').lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
biomass = model.reactions.get_by_id("Ec_biomass_iJO1366_core_53p95M")
biomass.upper_bound = gr
biomass.lower_bound = gr
all_reactions = map(str, model.reactions)
all_metabolites = map(str, model.metabolites)
m = model.to_array_based_model()
bounds_df = pd.DataFrame(index=all_reactions,columns=['lower_bound','upper_bound'])
bounds_df['lower_bound'] = m.lower_bounds
bounds_df['upper_bound'] = m.upper_bounds
#%%
mfa = MFA[condition]
mfa_std = MFA_std[condition]
df = pd.DataFrame(index=mfa.index, columns=['mean','stdev'])
df['mean'] = mfa
df['stdev'] = mfa_std
#%%
flux_means = df.iloc[:, 0]
flux_stderr = df.iloc[:, 1]
fluxes_df = pd.DataFrame(index=all_reactions)
fluxes_df.loc[flux_means.index, MEAS_FLUX_L] = flux_means
fluxes_df.loc[flux_means.index, MEAS_STDEV_L] = flux_stderr
#%%
pulp_solver = pulp.CPLEX(msg=0)
lp = pulp.LpProblem("FLUX_L1", pulp.LpMinimize)
measured_reactions = list(flux_means.index)
v_pred = pulp.LpVariable.dicts('v_pred', all_reactions)
v_meas = pulp.LpVariable.dicts('v_meas', measured_reactions)
v_resid = pulp.LpVariable.dicts('residual', measured_reactions)
# add flux bounds
for i in all_reactions:
lp += (v_pred[i] >= bounds_df.loc[i, 'lower_bound']), 'lower_bound_%s' % i
lp += (v_pred[i] <= bounds_df.loc[i, 'upper_bound']), 'upper_bound_%s' % i
# add constraint for each measured reaction i:
# |v_meas[i] - flux_means[i]| <= flux_stderr[i]
# v_resid[i] >= |v_pred[i] - v_meas[i]|
for i in measured_reactions:
lp += (v_meas[i] <= flux_means[i] + flux_stderr[i]), 'measured_upper_%s' % i
lp += (v_meas[i] >= flux_means[i] - flux_stderr[i]), 'measured_lower_%s' % i
lp += (v_pred[i] - v_resid[i] <= v_meas[i]), 'abs_diff_upper_%s' % i
lp += (-v_pred[i] - v_resid[i] <= -v_meas[i]), 'abs_diff_lower_%s' % i
# also set the objective to be minimizing sum_i abs_diff[i]
objective = pulp.lpSum(v_resid.values())
lp.setObjective(objective)
# add stoichiometric constraints for all internal metabolites: S_int * v = 0
for i,j in enumerate(m.S):
row = [l * v_pred[all_reactions[k]] for k,l in zip(j.rows[0],j.data[0])]
lp += (pulp.lpSum(row) == 0), 'mass_balance_%s' % all_metabolites[i]
lp.solve()
lp.writeLP("flux_mapping.lp")
fluxes_df.loc[all_reactions, PRED_FLUX_L] = \
map(lambda i: pulp.value(v_pred[i]), all_reactions)
fluxes_df.loc[measured_reactions, RESID_L] = \
map(lambda i: pulp.value(v_resid[i]), measured_reactions)
fluxes_df /= pulp.value(v_pred['Ec_biomass_iJO1366_core_53p95M']), # normalize all fluxes to the biomass flux (i.e. set it to 1)
#%%
fig, axs = plt.subplots(1, 2, figsize=(14,6))
fig.subplots_adjust(wspace=0.5)
axs[0].plot([-50, 50], [-50, 50], 'k', alpha=0.3, linewidth=0.5)
fluxes_df.plot(kind='scatter', x=MEAS_FLUX_L, y=PRED_FLUX_L,
xerr=MEAS_STDEV_L, ax=axs[0], linewidth=0, s=10,
color=(0.7,0.2,0.5))
axs[0].set_title(condition)
# for i in measured_reactions:
# xy = fluxes_df.loc[i, [MEAS_FLUX_L, PRED_FLUX_L]]
# axs[0].annotate(i, xy, xytext=(10,-5), textcoords='offset points',
# family='sans-serif', fontsize=10, color='darkslategrey')
fluxes_df.loc[~pd.isnull(fluxes_df[RESID_L]), RESID_L].plot(kind='barh',
ax=axs[1], color=(0.7,0.2,0.5))
axs[1].set_xlabel('residual [mmol/gCDW/h]')
fig.savefig('flux_projection.pdf')
fluxes_df.to_pickle('measured_fluxes.pkl')
fluxes_df.to_csv('measured_fluxes.csv')
#%%
# biomass = model.reactions.get_by_id("Ec_biomass_iJO1366_core_53p95M")
# biomass.upper_bound = 0.65
for rid in measured_reactions:
if fluxes_df["projected fluxes"][rid] < 0 :
continue
r = model.reactions.get_by_id(rid)
r.lower_bound = fluxes_df["projected fluxes"][rid]
r.upper_bound = fluxes_df["projected fluxes"][rid]
convert_to_irreversible(model)
pFBA = optimize_minimal_flux(model, already_irreversible=True)
reactions = map(str, model.reactions)
for r in reactions:
mmol_gCDW_h[condition][r] = pFBA.x_dict[r]
mmol_gCDW_h = pd.DataFrame.from_dict(mmol_gCDW_h)
mmol_gCDW_h.to_csv("../data/mmol_gCDW_h.csv")
|
mit
|
nagyistoce/kaggle-galaxies
|
predict_augmented_npy_8433n_maxout2048_pysex.py
|
7
|
9590
|
"""
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_pysex.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
|
bsd-3-clause
|
akshaykamath/ReviewPredictionYelp
|
PosNegWords.py
|
1
|
5494
|
__author__ = 'Pratish'
import codecs
import re
from collections import Counter
from nltk.tokenize import word_tokenize
import sys
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from os import path
from wordcloud import WordCloud
reload(sys)
sys.setdefaultencoding('utf-8')
inFile = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Sentences.txt","r","utf-8",errors='ignore')
dictFile = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Output Files\SentiWords_1.0.txt","r","UTF-8")
wordsDict = Counter()
countWords = Counter()
dictcount = 0
# Creating dictinary from wordnet
while (1):
readLine = dictFile.readline()
if (not readLine ): break
readLine = re.sub(r"[\n]","",readLine)
dictcount += 1
#print readLine
score = readLine.split("\t")
# print("Length "+str(len(score)))
# for i in score:
# print(" Word " + i )
if (len(score) == 2 and any(score[1]) ):
wordsDict[score[0].split("#")[0]] += float(score[1])
countWords[score[0].split("#")[0]] += 1.0
else:
print ("Some problem in line " + str(dictcount))
break
# #Take average for repeated words
for keys,value in wordsDict.items():
wordsDict[keys] = value / countWords[keys]
#negative_words = ["no","not","none","noone","Nobody","Nothing","Neither","Nowhere","Never","Hardly","Scarcely","Barely" ]
def calculateWeigth(x):
val1 = wordsDict[x]
# if ( val1 > 0 ): val1 += 1
# else: val1 = val1 * -1
return val1
count = 0
sentence_score = 0
sentence_length = 0
sentences_list =[]
hasNeg = 0
neg_count = []
words_score_pos = Counter()
words_score_neg = Counter()
posWordFreq = Counter()
negWordFreq = Counter()
filter_sent = []
while (1) :
reviewText = inFile.readline().lower()
reviewText = re.sub(r"[?|$|.'!]","",reviewText)
# print reviewText
if (not reviewText): break
for x in negative_words:
# print x
if(reviewText.find(x+" ") != -1):
hasNeg += 1
neg_count.append(x)
reviewTextList = word_tokenize(reviewText)
sentences_list.append(reviewTextList)
sentence_length = len(reviewTextList)
# print("Sentence length" + str(sentence_length))
# print(reviewTextList)
#sent_val = nltk.pos_tag(reviewTextList)
for index,x in enumerate(reviewTextList):
#print x
if (len(x) > 2):
value = calculateWeigth(x)
if (value > 0 ) :
words_score_pos [x] = value
posWordFreq[x] += 1
elif (value < 0):
words_score_neg[x] = value * -1
negWordFreq[x] += 1
sentence_score += value
if (sentence_length != 0):
print("ScoreW/O Div : " + str(sentence_score)),
sentence_score = sentence_score/sentence_length
print (" Score Div : " +str(sentence_length))
if ((count % 1000) == 0): print(count)
if (count == 5000): break
count += 1
#Most positive & neative words
outFile3 = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Output Files\MostPostiveWords.txt","w", "UTF-8")
counter = 0
for keys,value in sorted(words_score_pos.iteritems(), key=lambda (k,v): (v,k),reverse=True):
outFile3.write(keys + ";" + str(value)+"\n")
print(keys +';'+ str(value*10))
counter += 1
if(counter > 300): break
outFile4 = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Output Files\MostNegativeWords.txt","w", "UTF-8")
print("-") * 50
counter = 0
for keys,value in sorted(words_score_neg.iteritems(), key=lambda (k,v): (v,k),reverse=True):
outFile4.write(keys + ";"+ str(value)+"\n")
print(keys +':'+ str(value))
counter += 1
if(counter > 300): break
#Most positive and negative occuring words
def checkStopWord(word):
stopword = stopwords.words('english')
if (word in stopword) : return True
return False
mostPosOccWordList = []
outFile1 = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Output Files\MostOccPostiveWords.txt","w", "UTF-8")
counter = 0
for keys,value in sorted(posWordFreq.iteritems(), key=lambda (k,v): (v,k),reverse=True):
print(str(keys) + " " + str(value) )
if (not checkStopWord(keys)):
outFile1.write(keys+";"+ str(value)+"\n")
mostPosOccWordList.append([str(keys),int(value)])
counter += 1
if(counter > 300): break
print("-") * 50
outFile2 = codecs.open("C:\Users\Pratish\Documents\Assignments\Social Media Minning\Final Project\Output Files\MostOccNegativeWords.txt","w", "UTF-8")
mostNegOccWordList = []
counter = 0
for keys,value in sorted(negWordFreq.iteritems(), key=lambda (k,v): (v,k),reverse=True):
print(str(keys) + " " + str(value))
if (not checkStopWord(keys)):
outFile2.write(keys + ";"+ str(value)+"\n")
mostNegOccWordList.append([str(keys),int(value)])
counter += 1
if(counter > 300): break
# wcMostOccPosWords = WordCloud(max_font_size = 40, relative_scaling =.5, background_color='white').generate_from_frequencies(mostPosOccWordList)
# fig = plt.figure()
# fig.patch.set_facecolor('blue')
# fig.patch.set_alpha(0.7)
# plt.imshow(wcMostOccPosWords)
# plt.axis("off")
#
# fig.savefig('temp.png', facecolor=fig.get_facecolor(), transparent=True,edgecolor='none')
#
# plt.show()
inFile.close()
dictFile.close()
|
mit
|
HolgerPeters/scikit-learn
|
sklearn/kernel_ridge.py
|
48
|
6731
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
4c656554/ukgds2nx
|
ukgds2nx.py
|
1
|
10090
|
# Copyright (c) 2015 Lee J. Thomas <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines a parser for extracting network data stored according to the United
Kingdom Generic Distribution System (UKGDS) format in an Excel spreadsheet.
Converts the data to NetworkX MultiGraph format and saves as a .gpickle file.
@see: Foote C., Djapic P,. Ault G., Mutale J., Burt G., Strbac G., "United
Kingdom Generic Distribution System (UKGDS) Software Tools", The Centre for
Distributed Generation and Sustainable Electrical Energy, February 2006
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
# Operating system routines.
import os
# Numpy lib for convenient data storage/access
import numpy as np
# Pandas lib for convenient data storage/access
import pandas as pd
# Matplotlib lib for plotting
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.mlab import griddata
import networkx as nx
#------------------------------------------------------------------------------
# "ukgds2nx" function:
#------------------------------------------------------------------------------
def ukgds2nx(infile, outfile):
""" Extracts UKGDS data from an Excel spreadsheet and writes to gpickle file
"""
# Check that the path to the inputfile is valid.
if not os.path.isfile(infile):
raise NameError, "%s is not a valid filename" % infile
# System sheet ---------------------------------------------------------------
try:
sys_data=pd.read_excel(infile,'System',skiprows=28)
except:
print 'No system tab'
# Bus sheet ---------------------------------------------------------------
try:
bus_data=pd.read_excel(infile,'Buses',skiprows=28,parse_cols=range(1,12))
except:
print 'No Buses tab'
# Loads sheet ---------------------------------------------------------------
try:
load_data=pd.read_excel(infile,'Loads',skiprows=28,parse_cols=range(1,7))
except:
print 'No Loads tab'
# Generators sheet ---------------------------------------------------------------
try:
gen_data=pd.read_excel(infile,'Generators',skiprows=28,parse_cols=range(1,17))
except:
print 'No Generators tab'
# Transformers sheet ---------------------------------------------------------------
try:
tx_data=pd.read_excel(infile,'Transformers',skiprows=28,parse_cols=range(1,23))
except:
print 'No Transformers tab'
# IndGenerators sheet ---------------------------------------------------------------
try:
indgen_data=pd.read_excel(infile,'IndGenerators',skiprows=28,parse_cols=range(1,23))
except:
print 'No IndGenerators tab'
# Shunts sheet ---------------------------------------------------------------
try:
shunts_data=pd.read_excel(infile,'Shunts',skiprows=28,parse_cols=range(1,6))
except:
print 'No Shunts tab'
# Branches sheet ---------------------------------------------------------------
try:
branch_data=pd.read_excel(infile,'Branches',skiprows=28,parse_cols=range(1,15))
except:
print 'No Branches tab'
# Taps sheet ---------------------------------------------------------------
try:
taps_data=pd.read_excel(infile,'Taps',skiprows=28,parse_cols=range(1,23))
except:
print 'No Taps tab'
#--------------------------------------------------------------------------
# Create networkx graph
#--------------------------------------------------------------------------
G = nx.MultiGraph()
dupedgeind=0
ind=0
for index,bus in bus_data.iterrows():
G.add_node(int(bus['BNU']),attr_dict=dict(bus))
ind+=1
for index,b in branch_data.iterrows():
if G.has_edge(int(b['CFB']),int(b['CTB'])):
dupedgeind=dupedgeind+1
G.add_edge(int(b['CFB']),int(b['CTB']),key=dupedgeind,len=b['CLE'],attr_dict=dict(b))
else:
G.add_edge(int(b['CFB']),int(b['CTB']),key=dupedgeind,len=b['CLE'],attr_dict=dict(b))
dupedgeind=0
dupedgeind=0
for index,t in tx_data.iterrows():
if G.has_edge(int(t['TFB']),int(t['TTB'])):
dupedgeind=dupedgeind+1
G.add_edge(int(t['TFB']),int(t['TTB']),key=dupedgeind,len=0.01,attr_dict=dict(t))
else:
G.add_edge(int(t['TFB']),int(t['TTB']),key=dupedgeind,len=0.01,attr_dict=dict(t))
dupedgeind=0
ind=0
for index,load in load_data.iterrows():
G.add_node(int(load['LBN']),attr_dict=dict(load))
ind+=1
ind=0
for index,gen in gen_data.iterrows():
G.add_node(int(gen['GBN']),attr_dict=dict(gen))
ind+=1
G.add_node(int(sys_data.Value[sys_data.Symbol=='SSB'].values[0]),isSlack=True,BaseMVA=sys_data.Value[sys_data.Symbol=='SMB'].values[0],desc=sys_data.Value[sys_data.Symbol=='STD'].values[0])
ind=0
#--------------------------------------------------------------------------
# Write the text to the gpickle file:
#--------------------------------------------------------------------------
nx.write_gpickle(G,outfile+'.gpickle')
def plotmap(G,fileref='ukgds2nxFigure',graphprogram='neato',tag=''):
""" Generates node positions and plots UKGDS NetworkX Graph. No indication for parallel edges (lines) between nodes. Saves as a .png
"""
voltages = np.unique([G.node[x]['BBV'] for x,y in G.edges()])
colours = ['b','b','k','#808000','g','r','r','r','y']
thickness = [0.5*x for x in [1,1,1,1,1,1,1,1,1]]
alpha = [0.5*x for x in [1,1,1,1,1,1,1,1,1]]
colourmaps = [plt.cm.Blues,plt.cm.Blues,plt.cm.Greys,plt.cm.OrRd,plt.cm.summer,plt.cm.autumn,plt.cm.autumn,plt.cm.autumn,plt.cm.autumn]
pos=nx.graphviz_layout(G,prog=graphprogram) #This generates the node positions
for p,q in pos.iteritems():
G.add_node(p,pos=q)
notxedges = [(x,y,d) for x,y,d in G.edges(data=True) if 'CLE' in d]
labels=dict(((x,y),str(d['CLE'])+'km') for x,y,d in G.edges(data=True) if 'CLE' in d)
#--------------------------------------------------------------------------
# Plot for 11kV nodes
#--------------------------------------------------------------------------
nodes = [x for x,d in G.nodes(data=True) if d['BBV']==11]
#print nodes
SG = G.subgraph(nodes)
poslabels={}
for sgn in SG.nodes():
x,y = pos[sgn]
poslabels[sgn] = (x,y+100)
nodelabels = dict((x,x) for x,y in SG.nodes(data=True))
#print nx.get_node_attributes(SG,'pos')
#pos=[dict(n,pp) for n,pp in zip(SG.nodes(),[(p[0],p[1]+1) for x,p in nx.get_node_attributes(SG,'pos')])]
##print pos
#nx.draw_networkx_labels(SG,pos=poslabels,labels=nodelabels,font_size=8,alpha=0.6)
nx.draw_networkx_edges(SG,nx.get_node_attributes(SG,'pos'),edgelist=SG.edges(),edge_color='r',style='-',width=3,with_labels=False,alpha=0.4)
nx.draw(SG,nx.get_node_attributes(SG,'pos'),node_size=10,with_labels=False,node_color='r',alpha=0.4)
#--------------------------------------------------------------------------
# Plot for 33kV nodes
#--------------------------------------------------------------------------
nodes = [x for x,d in G.nodes(data=True) if d['BBV']==33]
#print nodes
SG = G.subgraph(nodes)
poslabels={}
for sgn in SG.nodes():
x,y = pos[sgn]
poslabels[sgn] = (x,y-100)
nodelabels = dict((x,x) for x,y in SG.nodes(data=True))
#print nx.get_node_attributes(SG,'pos')
nx.draw_networkx_labels(SG,pos=poslabels,labels=nodelabels,font_size=8,alpha=0.8)
nx.draw(SG,nx.get_node_attributes(SG,'pos'),node_size=20,with_labels=False,node_color='g',edge_color='g',style='-',width=3,alpha=0.4)
#--------------------------------------------------------------------------
# Plot for 132kV nodes
#--------------------------------------------------------------------------
nodes = [x for x,d in G.nodes(data=True) if d['BBV']==132]
#print nodes
SG = G.subgraph(nodes)
poslabels={}
for sgn in SG.nodes():
x,y = pos[sgn]
poslabels[sgn] = (x,y+100)
nodelabels = dict((x,x) for x,y in SG.nodes(data=True))
#print nx.get_node_attributes(SG,'pos')
nx.draw_networkx_labels(SG,pos=poslabels,labels=nodelabels,font_size=10,alpha=0.8)
nx.draw_networkx_edges(SG,nx.get_node_attributes(SG,'pos'),edgelist=SG.edges(),edge_color='k',style='-',width=3,with_labels=False,alpha=0.4)
nx.draw(SG,nx.get_node_attributes(SG,'pos'),node_size=30,with_labels=False,node_color='k',alpha=0.4)
#nx.draw_networkx_edge_labels(G,nx.get_node_attributes(G,'pos'),edge_labels=labels,rotate=False,font_size=7)
plt.savefig(fileref+str(graphprogram)+str(tag)+'.png',dpi=300))
|
mit
|
wathen/PhD
|
MHD/FEniCS/DGLaplacian/DGLaplacian.py
|
1
|
2590
|
from dolfin import *
import ipdb
import numpy as np
import matplotlib.pylab as plt
m =9
err = np.zeros((m-1,1))
N = np.zeros((m-1,1))
errh1 = np.zeros((m-1,1))
nn = 2
for xx in xrange(1,m):
# Create mesh and define function space
n = 2**xx
N[xx-1] = n
mesh = UnitSquareMesh(n,n)
tic()
V = FunctionSpace(mesh, "DG",2 )
print 'time to create function spaces',toc(),'\n\n'
# Define test and trial functions
v = TestFunction(V)
u = TrialFunction(V)
def boundary(x, on_boundary):
return on_boundary
u0 = Expression('x[0]*x[1]')
# # p0 = ('0')
# bcs = DirichletBC(V,u0, boundary)
# Define normal component, mesh size and right-hand side
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg = (h('+') + h('-'))/2
f = Expression('-2*(x[0]*x[0]-x[0]) - 2*(x[1]*x[1]-x[1])')
# Define parameters
alpha = 10.0
gamma = 10.0
# Define variational problem
tic()
a = dot(grad(v), grad(u))*dx \
- dot(avg(grad(v)), jump(u, n))*dS \
- dot(jump(v, n), avg(grad(u)))*dS \
+ alpha/h_avg*dot(jump(v, n), jump(u, n))*dS \
- dot(v*n, grad(u))*ds \
- dot(grad(v), u*n)*ds \
+ gamma/h*v*u*ds
L = v*f*dx + gamma/h*u0*v*ds - inner(grad(v),n)*u0*ds
AA,bb = assemble_system(a,L)
print 'time to creat linear system',toc(),'\n\n'
# Compute solution
u = Function(V)
tic()
set_log_level(PROGRESS)
solver = KrylovSolver("cg","hypre_amg")
solver.parameters["relative_tolerance"] = 1e-6
solver.parameters["absolute_tolerance"] = 1e-6
solver.solve(AA,u.vector(),bb)
set_log_level(PROGRESS)
print 'time to solve linear system', toc(),'\n\n'
# solve(a == L,u,bcs)
ue = Expression('x[0]*x[1]*(x[1]-1)*(x[0]-1) + x[0]*x[1]')
# ue = Expression('x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)')
erru = ue- Function(V,u)
err[xx-1]=errornorm(ue,Function(V,u),norm_type="L2", degree_rise=3,mesh=mesh)
errh1[xx-1]=errornorm(ue,Function(V,u),norm_type="H1", degree_rise=3,mesh=mesh)
print 'L2',err[xx-1]
print 'H1',errh1[xx-1]
# print sqrt(assemble(dolfin.dot(grad(erru),grad(erru))*dx))
# Plot solution
# plot(u, interactive=True)
plt.loglog(N,err)
plt.title('Error plot for DG1 elements - L2 convergence = %f' % np.log2(np.average((err[0:m-2]/err[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(N,err)
plt.title('Error plot for DG1 elements - H1 convergence = %f' % np.log2(np.average((errh1[0:m-2]/errh1[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.show()
|
mit
|
Natetempid/nearfield
|
GUI_Layout_1/GUI_Layout_1/subframe_keithley_control.py
|
1
|
15116
|
import Tkinter as tk
import tkMessageBox
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import datetime
import time
import ttk
import threading
#from frame_fluke8808a_control import fluke8808a_control_frame
import Queue as q
import numpy as np
class keithley_control_subframe(tk.Frame):
def __init__(self, master, controller, root, keithley ):
tk.Frame.__init__(self, master)
self.master = master
self.root = root
self.controller = controller
self.keithley = keithley
self.keithleyframe = None
self.outputon = False
self.rampthread = threading.Thread()
self.rampevent = threading.Event()
self.rampevent.set()
self.rampthread_active = False
#self.grid_rowconfigure(0,weight = 1)
#self.grid_columnconfigure(0, weight = 1)
self.config(borderwidth = 5, relief = tk.GROOVE)
self.headingstr = tk.StringVar()
self.headingstr.set('Keithley')
self.headinglbl = tk.Label(self, textvariable = self.headingstr, font = ('tkDefaultFont', 18))
self.headinglbl.grid(row = 0, column = 0, sticky = 'new')
self.maxminlblframe = tk.LabelFrame(self, text = 'Max/Min Values')
self.maxminlblframe.grid(row = 1, column = 0, sticky = 'new')
self.maxminlblframe.grid_columnconfigure(0, weight = 1)
self.maxminsubframe = tk.Frame(self.maxminlblframe)
self.maxminsubframe.grid(row = 0, column = 0, sticky = 'nsew')
self.maxminsubframe.grid_columnconfigure(0, weight = 1)
self.maxminsubframe.grid_columnconfigure(1, weight = 1)
self.maxlbl = tk.Label(self.maxminsubframe, text = 'Max Limit (V)')
self.maxlbl.grid(row = 0, column = 0, sticky ='nsew')
self.minlbl = tk.Label(self.maxminsubframe, text = 'Min Limit (V)')
self.minlbl.grid(row = 0, column = 1, sticky ='nsew')
self.maxstr = tk.StringVar()
self.maxstr.set(str(self.keithley.maxvoltage))
self.maxentry = tk.Entry(self.maxminsubframe, textvariable = self.maxstr)
self.maxentry.grid(row = 1, column = 0, sticky = 'nsew')
self.minstr = tk.StringVar()
self.minstr.set(str(self.keithley.minvoltage))
self.minentry = tk.Entry(self.maxminsubframe, textvariable = self.minstr)
self.minentry.grid(row = 1, column = 1, sticky = 'nsew')
self.setlimitsbtn = ttk.Button(self.maxminlblframe, text = 'Update Keithley Limits', command = lambda: self.updatekeithleylimits() )
self.setlimitsbtn.grid(row = 1, column = 0, sticky = 'nsew' )
self.outputonoffframe = tk.Frame(self, borderwidth = 5, relief = tk.GROOVE)
self.outputonoffframe.grid(row = 2, column = 0, sticky = 'ew')
self.outputonoffframe.grid_columnconfigure(1, weight = 1)
self.onoffindicator_canvas = tk.Canvas(self.outputonoffframe, width = 25, height = 25)
self.onoffindicator_canvas.grid(row = 0, column = 0, sticky = 'ns')
self.onoffindicator = self.onoffindicator_canvas.create_oval(5,5,20,20, fill = 'gray38')
self.outputonoffbtn = ttk.Button(self.outputonoffframe, text = 'Ouput On/Off', command = lambda: self.outputonoffclick() )
self.outputonoffbtn.grid(row = 0, column = 1, sticky = 'nsew')
self.lastreadinglblframe = tk.LabelFrame(self, text = 'Last Reading')
self.lastreadinglblframe.grid(row = 3, column = 0, sticky = 'new')
self.lastreadinglblframe.grid_rowconfigure(0, weight = 1)
self.lastreadinglblframe.grid_columnconfigure(0, weight = 1)
self.lastreadingstr = tk.StringVar()
self.lastreadingstr.set('0')
self.lastreadinglbl = tk.Label(self.lastreadinglblframe, textvariable = self.lastreadingstr)
self.lastreadinglbl.grid(row = 0, column = 0, sticky = 'nsew')
self.voltagesteplblframe = tk.LabelFrame(self, text = 'Voltage Step (V)')
self.voltagesteplblframe.grid(row = 4, column = 0, sticky = 'new')
self.voltagesteplblframe.grid_rowconfigure(0, weight = 1)
self.voltagesteplblframe.grid_columnconfigure(0, weight = 1)
self.voltagestepstr = tk.StringVar()
self.voltagestepstr.set('1')
self.voltagesteplbl = tk.Entry(self.voltagesteplblframe, textvariable = self.voltagestepstr )
self.voltagesteplbl.grid(row = 0, column = 0, sticky = 'nsew')
self.updownframe = tk.Frame(self)
self.updownframe.grid(row = 5, column = 0, sticky = 'new')
self.updownframe.grid_columnconfigure(0, weight = 1)
upimage = tk.PhotoImage(file = 'up.gif')
downimage = tk.PhotoImage(file = 'down.gif')
self.upbtn = tk.Button(self.updownframe, image = upimage, bg = 'white', command = lambda: self.incrementVoltage())
self.upbtn.grid(row = 0, column = 0, sticky = 'ew')
self.downbtn = tk.Button(self.updownframe, image = downimage, bg = 'white', command = lambda: self.decrementVoltage())
self.downbtn.grid(row = 1, column = 0, sticky = 'ew')
self.upbtn.image = upimage
self.downbtn.image = downimage
self.bindingcheckvar = tk.IntVar()
self.bindingcheckvar.set(0)
self.bindingcheckbtn = tk.Checkbutton(self.updownframe, text = 'Bind Arrow Keys', variable = self.bindingcheckvar, command = lambda: self.bindarrowkeys())
self.bindingcheckbtn.grid(row = 2, column = 0, sticky = 'ew')
#abort and turn off buttons
self.offbtnsframe = tk.Frame(self)
self.offbtnsframe.grid(row = 6, column = 0, sticky = 'new')
self.offbtnsframe.grid_rowconfigure(0, weight = 1)
self.offbtnsframe.grid_columnconfigure(0, weight = 1)
#self.offbtnsframe.grid_columnconfigure(1, weight = 1)
# self.offbtn = tk.Button(self.offbtnsframe, text = 'Off', bg = 'blue', fg = 'white', command = lambda: self.turnoff(), font = ('tkDefaultFont', 14))
# self.offbtn.grid(row = 0, column = 0, sticky = 'new')
self.abortbtn = tk.Button(self.offbtnsframe, text = 'Abort', bg = 'red', command = lambda: self.abort(), font = ('tkDefaultFont', 14) )
self.abortbtn.grid(row = 0, column = 0, sticky = 'new')
self.autorampframe = tk.Frame(self, borderwidth = 3, relief = tk.GROOVE)
self.autorampframe.grid(row = 7, column = 0, sticky = 'new')
self.autorampframe.grid_rowconfigure(1, weight = 1)
self.autorampframe.grid_columnconfigure(0, weight = 1)
self.autorampcheckvar = tk.IntVar()
self.autorampcheckvar.set(0)
self.autorampcheckbox = tk.Checkbutton(self.autorampframe, text = 'Auto Ramp', variable = self.autorampcheckvar, command = lambda: self.setautoramp() )
self.autorampcheckbox.grid(row = 0, column = 0, sticky = 'new')
self.autorampsubframe = tk.Frame(self.autorampframe)
self.autorampsubframe.grid(row = 1, column = 0, sticky = 'new')
self.autorampsubframe.grid_columnconfigure(0, weight = 1)
self.ramptimelbl = tk.Label(self.autorampsubframe, text = 'Ramp Time Step (s)')
self.ramptimelbl.grid(row = 0, column = 0, sticky = 'new')
self.ramptimestr = tk.StringVar()
self.ramptimestr.set('10')
self.ramptimeentry = tk.Entry(self.autorampsubframe, textvariable = self.ramptimestr)
self.ramptimeentry.grid(row = 1, column = 0, sticky = 'new')
self.rampupbtn = ttk.Button(self.autorampsubframe, text = 'Ramp up', command = lambda: self.rampup())
self.rampupbtn.grid(row = 2, column = 0, sticky = 'new')
self.rampdownbtn = ttk.Button(self.autorampsubframe, text = 'Ramp down', command = lambda: self.rampdown())
self.rampdownbtn.grid(row = 3, column = 0, sticky = 'new')
self.rampstatelbl = tk.Label(self.autorampsubframe, text = 'Ramping Idle')
self.rampstatelbl.grid(row = 4, column = 0, sticky = 'new')
self.rampstopbtn = tk.Button(self.autorampsubframe, text = 'Stop Ramp', command = lambda: self.stopramp())
self.rampstopbtn.config(background = 'red', foreground = 'black')
self.rampstopbtn.grid(row = 5, column = 0, sticky = 'new')
self.disableframe(self.autorampsubframe)
def updatekeithleylimits(self):
print self.keithley.maxvoltage, self.keithley.minvoltage
self.keithley.maxvoltage = float(self.maxstr.get())
self.keithley.minvoltage = float(self.minstr.get())
print self.keithley.maxvoltage, self.keithley.minvoltage
def disableframe(self, frame):
for child in frame.winfo_children():
child.configure(state='disable')
def enableframe(self, frame):
for child in frame.winfo_children():
child.configure(state='normal')
def setautoramp(self):
if self.autorampcheckvar.get():
self.enableframe(self.autorampsubframe)
else:
self.disableframe(self.autorampsubframe)
def stopramp(self):
self.rampevent.set()
def rampup(self):
self.rampthread = threading.Thread(target = self.__rampup)
self.rampthread.start()
def __rampup(self):
#reinitialize the ramp event and the rampactive bool
self.rampevent.clear()
self.rampthread_active = True
#disable the rampdown button
self.rampdownbtn.config(state = tk.DISABLED)
self.rampstatelbl.config(text = 'Ramping Up...')
while (not self.rampevent.is_set()):
#get the time step
timestep = float(self.ramptimestr.get())
self.rampevent.wait(timestep)
self.incrementVoltage() #comment out while testing
if float(self.lastreadingstr.get()) >= self.keithley.maxvoltage:
self.rampevent.set()
#Reset all parameters back to normal state
self.rampdownbtn.config(state = tk.NORMAL)
self.rampstatelbl.config(text = 'Ramping Idle')
self.rampthread_active = False
def rampdown(self):
self.rampthread = threading.Thread(target = self.__rampdown)
self.rampthread.start()
def __rampdown(self):
#reinitialize the ramp event and the rampactive bool
self.rampevent.clear()
self.rampthread_active = True
#disable rampup btn
self.rampupbtn.config(state = tk.DISABLED)
self.rampstatelbl.config(text = 'Ramping Down...')
while (not self.rampevent.is_set()):
#get the time step
timestep = float(self.ramptimestr.get())
self.rampevent.wait(timestep)
self.decrementVoltage() #comment out while testing
if float(self.lastreadingstr.get()) <= self.keithley.minvoltage:
self.rampevent.set()
#Reset all parameters back to normal state
self.rampupbtn.config(state = tk.NORMAL)
self.rampstatelbl.config(text = 'Ramping Idle')
self.rampthread_active = False
def outputonoffclick(self):
#get previous state
if self.keithley.outputon: #then turn off
self.keithley.disableOutput()
self.onoffindicator_canvas.itemconfig(self.onoffindicator, fill = "gray38")
else:#turn on output
self.keithley.enableOutput()
self.onoffindicator_canvas.itemconfig(self.onoffindicator, fill = "RoyalBlue2")
def bindarrowkeys(self):
if self.bindingcheckvar.get() == 1:
self.root.bind('<Up>', self.uparrow)
self.root.bind('<Down>', self.downarrow)
else:
self.root.unbind('<Up>')
self.root.unbind('<Down>')
def uparrow(self, event):
self.incrementVoltage()
def downarrow(self, event):
self.decrementVoltage()
def incrementVoltage(self):
self.upbtn.config(state = tk.DISABLED)
self.downbtn.config(state = tk.DISABLED)
if self.keithleyframe == None:
self.setKeithleyFrame()
#set Keithley deltaV whatever the current delta V is
self.keithley.deltaV = float( self.voltagestepstr.get() )
#determine if keithley thread is running and data are being measured
plotwasrunning = self.keithleyframe.plot_running #determine if plot was running before changing any frame settings
print plotwasrunning
if self.keithley.thread_active:
#stop measurement and the plot
self.keithleyframe.measure_click()
while self.keithley.thread_active:
time.sleep(0.002)
#increment voltage
data = self.keithley.incrementVoltage()
#restart measurement
if plotwasrunning: #then the plot was also running, and need to restart the plot
self.keithleyframe.measure_and_plot_click()
else: #then only the measurement was running and the plot wasn't
self.keithleyframe.measure_click()
else:
data = self.keithley.incrementVoltage()
parseddata = self.keithley.parseData(data)
self.lastreadingstr.set( str(parseddata[1]))
self.upbtn.config(state = tk.NORMAL)
self.downbtn.config(state = tk.NORMAL)
def decrementVoltage(self):
self.upbtn.config(state = tk.DISABLED)
self.downbtn.config(state = tk.DISABLED)
if self.keithleyframe == None:
self.setKeithleyFrame()
#set Keithley deltaV whatever the current delta V is
self.keithley.deltaV = float( self.voltagestepstr.get() )
#determine if keithley thread is running and data are being measured
plotwasrunning = self.keithleyframe.plot_running #determine if plot was running before changing any frame settings
if self.keithley.thread_active: #thread active problem
#stop measurement
self.keithleyframe.measure_click()
while self.keithley.thread_active:
time.sleep(0.002)
#increment voltage
data = self.keithley.decrementVoltage()
#restart measurement
if plotwasrunning: #then the plot was also running, and need to restart the plot
self.keithleyframe.measure_and_plot_click()
else: #then only the measurement was running and the plot wasn't
self.keithleyframe.measure_click()
else:
data = self.keithley.decrementVoltage()
parseddata = self.keithley.parseData(data)
self.lastreadingstr.set( str(parseddata[1]))
self.upbtn.config(state = tk.NORMAL)
self.downbtn.config(state = tk.NORMAL)
def setKeithleyFrame(self):
self.keithleyframe = self.getKeithleyFrame()
def getKeithleyFrame(self):
framekeys = self.controller.frames.keys()
for key in framekeys:
if 'keithley_measure' in key.__name__:
return self.controller.frames[key]
def abort(self):
self.keithley.abort()
#def turnoff(self):
|
gpl-3.0
|
linyufly/NaVisualizer
|
visualizer.py
|
1
|
1417
|
import sys
import csv
import numpy as np
from datetime import datetime
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator, DayLocator
def main():
print 'File: {0}'.format(str(sys.argv[1]))
with open(sys.argv[1], 'r') as csv_file:
table = [row for row in csv.reader(csv_file, delimiter = ',', quotechar = '"')]
header = table[0]
table = [[datetime.strptime(row[0], '%Y/%m/%d')] +
[float(element) for (idx, element) in enumerate(row) if idx >= 1]
for (idx, row) in enumerate(table) if idx >= 2]
print 'Header: {0}'.format(header)
np_table = np.array(table)
date_list = np_table[:, 0]
close_list = np_table[:, 1]
open_list = np_table[:, 3]
high_list = np_table[:, 4]
low_list = np_table[:, 5]
fig, close_plot = plt.subplots()
close_plot.plot(date_list, low_list, 'b', date_list, high_list, 'r')
close_plot.set_ylim(0)
# format the ticks
close_plot.xaxis.set_major_locator(MonthLocator())
close_plot.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
close_plot.xaxis.set_minor_locator(DayLocator())
# format the coordinate message box
def PriceFormatter(x): return '$%.2f' % x
close_plot.format_xdata = DateFormatter('%Y-%m-%d')
close_plot.format_ydata = PriceFormatter
close_plot.grid(True)
# for x-axis
fig.autofmt_xdate()
plt.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
myt00seven/svrg
|
bk/svrg_no_BN/interpolate.py
|
1
|
1890
|
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
import lasagne
from load_dataset import *
from neuralnet import build
import seaborn
def interpolate(x1, x2, nums=10):
d = (x2 - x1) / float(nums)
return np.array([x1 + i * d for i in range(nums + 1)])
def main():
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
input_var = T.matrix('inputs')
target_var = T.matrix('targets')
model = 'adam_reg'
with np.load('models/model_%s.npz' % model) as f:
param_values = [f['arr_%d' % j] for j in range(len(f.files))]
encoder = lasagne.layers.DenseLayer(
lasagne.layers.InputLayer(shape=(None, 784), input_var=input_var),
num_units=300,
nonlinearity=lasagne.nonlinearities.sigmoid
)
lasagne.layers.set_all_param_values(encoder, param_values[:2])
decoder = lasagne.layers.DenseLayer(
lasagne.layers.InputLayer(shape=(None, 300), input_var=input_var),
num_units=784,
nonlinearity=lasagne.nonlinearities.sigmoid
)
lasagne.layers.set_all_param_values(decoder, param_values[2:])
n_images = 10
n_rows = 10
for row in range(n_rows):
n1 = np.random.randint(0, X_train.shape[0])
n2 = np.random.randint(0, X_train.shape[0])
enc1 = lasagne.layers.get_output(encoder, X_train[n1]).eval()
enc2 = lasagne.layers.get_output(encoder, X_train[n2]).eval()
images = lasagne.layers.get_output(decoder, interpolate(enc1, enc2, n_images)).eval()
for i in range(n_images):
plt.subplot(n_rows, n_images, row * n_images + i + 1)
plt.axis('off')
plt.imshow(images[i].reshape(28, 28), cmap='Greys')
plt.show()
main()
|
mit
|
massmutual/scikit-learn
|
sklearn/utils/validation.py
|
9
|
25126
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
bsd-3-clause
|
jakobworldpeace/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
135
|
1223
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
harshaneelhg/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
jdanbrown/pydatalab
|
google/datalab/stackdriver/monitoring/_group.py
|
5
|
2932
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Groups for the Google Monitoring API."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
import collections
import fnmatch
import pandas
import google.datalab
from . import _utils
class Groups(object):
"""Represents a list of Stackdriver groups for a project."""
_DISPLAY_HEADERS = ('Group ID', 'Group name', 'Parent ID', 'Parent name',
'Is cluster', 'Filter')
def __init__(self, context=None):
"""Initializes the Groups for a Stackdriver project.
Args:
context: An optional Context object to use instead of the global default.
"""
self._context = context or google.datalab.Context.default()
self._client = _utils.make_client(self._context)
self._group_dict = None
def list(self, pattern='*'):
"""Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters.
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)]
def as_dataframe(self, pattern='*', max_rows=None):
"""Creates a pandas dataframe from the groups that match the filters.
Args:
pattern: An optional pattern to further filter the groups. This can
include Unix shell-style wildcards. E.g. ``"Production *"``,
``"*-backend"``.
max_rows: The maximum number of groups to return. If None, return all.
Returns:
A pandas dataframe containing matching groups.
"""
data = []
for i, group in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
parent = self._group_dict.get(group.parent_id)
parent_display_name = '' if parent is None else parent.display_name
data.append([
group.id, group.display_name, group.parent_id,
parent_display_name, group.is_cluster, group.filter])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
|
apache-2.0
|
madjelan/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
jim-pansn/graph-tool
|
src/graph_tool/all.py
|
1
|
2079
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2015 Tiago de Paula Peixoto <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility module which includes all the sub-modules in graph_tool
"""
from __future__ import division, absolute_import, print_function
import warnings
from graph_tool import *
import graph_tool
from graph_tool.correlations import *
import graph_tool.correlations
from graph_tool.centrality import *
import graph_tool.centrality
try:
from graph_tool.draw import *
import graph_tool.draw
except ImportError as e:
# Proceed despite errors with cairo, matplotlib, etc.
msg = "Error importing draw module, proceeding nevertheless: " + str(e)
warnings.warn(msg, RuntimeWarning)
pass
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.generation import *
import graph_tool.generation
from graph_tool.stats import *
import graph_tool.stats
from graph_tool.clustering import *
import graph_tool.clustering
from graph_tool.community import *
import graph_tool.community
from graph_tool.topology import *
import graph_tool.topology
from graph_tool.flow import *
import graph_tool.flow
from graph_tool.spectral import *
import graph_tool.spectral
from graph_tool.search import *
import graph_tool.search
from graph_tool.util import *
import graph_tool.util
import graph_tool.collection
import graph_tool.collection as collection
|
gpl-3.0
|
mjgrav2001/scikit-learn
|
sklearn/utils/fixes.py
|
133
|
12882
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
|
bsd-3-clause
|
jaidevd/scikit-learn
|
benchmarks/bench_sparsify.py
|
323
|
3372
|
"""
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
|
bsd-3-clause
|
influxdb/influxdb-python
|
influxdb/_dataframe_client.py
|
1
|
17548
|
# -*- coding: utf-8 -*-
"""DataFrame client for InfluxDB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from collections import defaultdict
import pandas as pd
import numpy as np
from .client import InfluxDBClient
from .line_protocol import _escape_tag
def _pandas_time_unit(time_precision):
unit = time_precision
if time_precision == 'm':
unit = 'ms'
elif time_precision == 'u':
unit = 'us'
elif time_precision == 'n':
unit = 'ns'
assert unit in ('s', 'ms', 'us', 'ns')
return unit
def _escape_pandas_series(s):
return s.apply(lambda v: _escape_tag(v))
class DataFrameClient(InfluxDBClient):
"""DataFrameClient instantiates InfluxDBClient to connect to the backend.
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
def write_points(self,
dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None,
database=None,
retention_policy=None,
batch_size=None,
protocol='line',
numeric_precision=None):
"""Write to multiple time series names.
:param dataframe: data points in a DataFrame
:param measurement: name of measurement
:param tags: dictionary of tags, with string key-values
:param time_precision: [Optional, default None] Either 's', 'ms', 'u'
or 'n'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
:param protocol: Protocol for writing data. Either 'line' or 'json'.
:param numeric_precision: Precision for floating point values.
Either None, 'full' or some int, where int is the desired decimal
precision. 'full' preserves full precision for int and float
datatypes. Defaults to None, which preserves 14-15 significant
figures for float and all significant figures for int datatypes.
"""
if tag_columns is None:
tag_columns = []
if field_columns is None:
field_columns = []
if batch_size:
number_batches = int(math.ceil(len(dataframe) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
global_tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe.iloc[start_index:end_index].copy(),
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
if protocol == 'line':
points = self._convert_dataframe_to_lines(
dataframe,
measurement=measurement,
global_tags=tags,
tag_columns=tag_columns,
field_columns=field_columns,
time_precision=time_precision,
numeric_precision=numeric_precision)
else:
points = self._convert_dataframe_to_json(
dataframe,
measurement=measurement,
tags=tags,
time_precision=time_precision,
tag_columns=tag_columns,
field_columns=field_columns)
super(DataFrameClient, self).write_points(
points,
time_precision,
database,
retention_policy,
protocol=protocol)
return True
def query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
raise_errors=True,
chunked=False,
chunk_size=0,
method="GET",
dropna=True):
"""
Query data into a DataFrame.
:param query: the actual query string
:param params: additional parameters for the request, defaults to {}
:param epoch: response timestamps to be in epoch format either 'h',
'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
RFC3339 UTC format with nanosecond precision
:param expected_response_code: the expected status code of response,
defaults to 200
:param database: database to query, defaults to None
:param raise_errors: Whether or not to raise exceptions when InfluxDB
returns errors, defaults to True
:param chunked: Enable to use chunked responses from InfluxDB.
With ``chunked`` enabled, one ResultSet is returned per chunk
containing all results within that chunk
:param chunk_size: Size of each chunk to tell InfluxDB to use.
:param dropna: drop columns where all values are missing
:returns: the queried data
:rtype: :class:`~.ResultSet`
"""
query_args = dict(params=params,
epoch=epoch,
expected_response_code=expected_response_code,
raise_errors=raise_errors,
chunked=chunked,
database=database,
method=method,
chunk_size=chunk_size)
results = super(DataFrameClient, self).query(query, **query_args)
if query.strip().upper().startswith("SELECT"):
if len(results) > 0:
return self._to_dataframe(results, dropna)
else:
return {}
else:
return results
def _to_dataframe(self, rs, dropna=True):
result = defaultdict(list)
if isinstance(rs, list):
return map(self._to_dataframe, rs)
for key, data in rs.items():
name, tags = key
if tags is None:
key = name
else:
key = (name, tuple(sorted(tags.items())))
df = pd.DataFrame(data)
df.time = pd.to_datetime(df.time)
df.set_index('time', inplace=True)
df.index = df.index.tz_localize('UTC')
df.index.name = None
result[key].append(df)
for key, data in result.items():
df = pd.concat(data).sort_index()
if dropna:
df.dropna(how='all', axis=1, inplace=True)
result[key] = df
return result
@staticmethod
def _convert_dataframe_to_json(dataframe,
measurement,
tags=None,
tag_columns=None,
field_columns=None,
time_precision=None):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or '
'PeriodIndex.')
# Make sure tags and tag columns are correctly typed
tag_columns = tag_columns if tag_columns is not None else []
field_columns = field_columns if field_columns is not None else []
tags = tags if tags is not None else {}
# Assume field columns are all columns not included in tag columns
if not field_columns:
field_columns = list(
set(dataframe.columns).difference(set(tag_columns)))
dataframe.index = pd.to_datetime(dataframe.index)
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
# Convert column to strings
dataframe.columns = dataframe.columns.astype('str')
# Convert dtype for json serialization
dataframe = dataframe.astype('object')
precision_factor = {
"n": 1,
"u": 1e3,
"ms": 1e6,
"s": 1e9,
"m": 1e9 * 60,
"h": 1e9 * 3600,
}.get(time_precision, 1)
points = [
{'measurement': measurement,
'tags': dict(list(tag.items()) + list(tags.items())),
'fields': rec,
'time': np.int64(ts.value / precision_factor)}
for ts, tag, rec in zip(dataframe.index,
dataframe[tag_columns].to_dict('record'),
dataframe[field_columns].to_dict('record'))
]
return points
def _convert_dataframe_to_lines(self,
dataframe,
measurement,
field_columns=None,
tag_columns=None,
global_tags=None,
time_precision=None,
numeric_precision=None):
dataframe = dataframe.dropna(how='all').copy()
if len(dataframe) == 0:
return []
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or '
'PeriodIndex.')
dataframe = dataframe.rename(
columns={item: _escape_tag(item) for item in dataframe.columns})
# Create a Series of columns for easier indexing
column_series = pd.Series(dataframe.columns)
if field_columns is None:
field_columns = []
if tag_columns is None:
tag_columns = []
if global_tags is None:
global_tags = {}
# Make sure field_columns and tag_columns are lists
field_columns = list(field_columns) if list(field_columns) else []
tag_columns = list(tag_columns) if list(tag_columns) else []
# If field columns but no tag columns, assume rest of columns are tags
if field_columns and (not tag_columns):
tag_columns = list(column_series[~column_series.isin(
field_columns)])
# If no field columns, assume non-tag columns are fields
if not field_columns:
field_columns = list(column_series[~column_series.isin(
tag_columns)])
precision_factor = {
"n": 1,
"u": 1e3,
"ms": 1e6,
"s": 1e9,
"m": 1e9 * 60,
"h": 1e9 * 3600,
}.get(time_precision, 1)
# Make array of timestamp ints
if isinstance(dataframe.index, pd.PeriodIndex):
time = ((dataframe.index.to_timestamp().values.astype(np.int64) /
precision_factor).astype(np.int64).astype(str))
else:
time = ((pd.to_datetime(dataframe.index).values.astype(np.int64) /
precision_factor).astype(np.int64).astype(str))
# If tag columns exist, make an array of formatted tag keys and values
if tag_columns:
# Make global_tags as tag_columns
if global_tags:
for tag in global_tags:
dataframe[tag] = global_tags[tag]
tag_columns.append(tag)
tag_df = dataframe[tag_columns]
tag_df = tag_df.fillna('') # replace NA with empty string
tag_df = tag_df.sort_index(axis=1)
tag_df = self._stringify_dataframe(
tag_df, numeric_precision, datatype='tag')
# join preprendded tags, leaving None values out
tags = tag_df.apply(
lambda s: [',' + s.name + '=' + v if v else '' for v in s])
tags = tags.sum(axis=1)
del tag_df
elif global_tags:
tag_string = ''.join(
[",{}={}".format(k, _escape_tag(v)) if v else ''
for k, v in sorted(global_tags.items())]
)
tags = pd.Series(tag_string, index=dataframe.index)
else:
tags = ''
# Make an array of formatted field keys and values
field_df = dataframe[field_columns]
# Keep the positions where Null values are found
mask_null = field_df.isnull().values
field_df = self._stringify_dataframe(field_df,
numeric_precision,
datatype='field')
field_df = (field_df.columns.values + '=').tolist() + field_df
field_df[field_df.columns[1:]] = ',' + field_df[
field_df.columns[1:]]
field_df = field_df.where(~mask_null, '') # drop Null entries
fields = field_df.sum(axis=1)
del field_df
# Generate line protocol string
measurement = _escape_tag(measurement)
points = (measurement + tags + ' ' + fields + ' ' + time).tolist()
return points
@staticmethod
def _stringify_dataframe(dframe, numeric_precision, datatype='field'):
# Prevent modification of input dataframe
dframe = dframe.copy()
# Find int and string columns for field-type data
int_columns = dframe.select_dtypes(include=['integer']).columns
string_columns = dframe.select_dtypes(include=['object']).columns
# Convert dframe to string
if numeric_precision is None:
# If no precision specified, convert directly to string (fast)
dframe = dframe.astype(str)
elif numeric_precision == 'full':
# If full precision, use repr to get full float precision
float_columns = (dframe.select_dtypes(
include=['floating']).columns)
nonfloat_columns = dframe.columns[~dframe.columns.isin(
float_columns)]
dframe[float_columns] = dframe[float_columns].applymap(repr)
dframe[nonfloat_columns] = (dframe[nonfloat_columns].astype(str))
elif isinstance(numeric_precision, int):
# If precision is specified, round to appropriate precision
float_columns = (dframe.select_dtypes(
include=['floating']).columns)
nonfloat_columns = dframe.columns[~dframe.columns.isin(
float_columns)]
dframe[float_columns] = (dframe[float_columns].round(
numeric_precision))
# If desired precision is > 10 decimal places, need to use repr
if numeric_precision > 10:
dframe[float_columns] = (dframe[float_columns].applymap(repr))
dframe[nonfloat_columns] = (dframe[nonfloat_columns]
.astype(str))
else:
dframe = dframe.astype(str)
else:
raise ValueError('Invalid numeric precision.')
if datatype == 'field':
# If dealing with fields, format ints and strings correctly
dframe[int_columns] += 'i'
dframe[string_columns] = '"' + dframe[string_columns] + '"'
elif datatype == 'tag':
dframe = dframe.apply(_escape_pandas_series)
dframe.columns = dframe.columns.astype(str)
return dframe
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 'h':
return seconds / 3600
elif time_precision == 'm':
return seconds / 60
elif time_precision == 's':
return seconds
elif time_precision == 'ms':
return seconds * 1e3
elif time_precision == 'u':
return seconds * 1e6
elif time_precision == 'n':
return seconds * 1e9
|
mit
|
bhilburn/gnuradio
|
gr-filter/examples/reconstruction.py
|
49
|
5015
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import channels
except ImportError:
print "Error: Program requires gr-channels."
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0/fs
t = scipy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print "Filter length: ", len(proto_taps)
# Create a modulated signal
npwr = 0.01
data = scipy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(scipy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in xrange(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = scipy.array(src_snk.data()[1000:])
sout = scipy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pylab.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(scipy.sqrt(nchans))
ncols = int(scipy.ceil(float(nchans)/float(nrows)))
f2 = pylab.figure(2, figsize=(16,12), facecolor='w')
for n in xrange(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pylab.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
architecture-building-systems/CEAforArcGIS
|
cea/technologies/thermal_network/simplified_thermal_network.py
|
2
|
29336
|
import math
import time
import geopandas as gpd
import numpy as np
import pandas as pd
import wntr
import cea.config
import cea.inputlocator
import cea.technologies.substation as substation
from cea.constants import P_WATER_KGPERM3, FT_WATER_TO_PA, FT_TO_M, M_WATER_TO_PA, HEAT_CAPACITY_OF_WATER_JPERKGK, SHAPEFILE_TOLERANCE
from cea.optimization.constants import PUMP_ETA
from cea.optimization.preprocessing.preprocessing_main import get_building_names_with_load
from cea.technologies.thermal_network.thermal_network_loss import calc_temperature_out_per_pipe
from cea.resources import geothermal
from cea.technologies.constants import NETWORK_DEPTH
from cea.utilities.epwreader import epw_reader
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def calculate_ground_temperature(locator):
"""
calculate ground temperatures.
:param locator:
:return: list of ground temperatures, one for each hour of the year
:rtype: list[np.float64]
"""
weather_file = locator.get_weather_file()
T_ambient_C = epw_reader(weather_file)['drybulb_C']
network_depth_m = NETWORK_DEPTH # [m]
T_ground_K = geothermal.calc_ground_temperature(locator, T_ambient_C.values, network_depth_m)
return T_ground_K
def extract_network_from_shapefile(edge_shapefile_df, node_shapefile_df):
"""
Extracts network data into DataFrames for pipes and nodes in the network
:param edge_shapefile_df: DataFrame containing all data imported from the edge shapefile
:param node_shapefile_df: DataFrame containing all data imported from the node shapefile
:type edge_shapefile_df: DataFrame
:type node_shapefile_df: DataFrame
:return node_df: DataFrame containing all nodes and their corresponding coordinates
:return edge_df: list of edges and their corresponding lengths and start and end nodes
:rtype node_df: DataFrame
:rtype edge_df: DataFrame
"""
# create node dictionary with plant and consumer nodes
node_dict = {}
node_shapefile_df.set_index("Name", inplace=True)
node_shapefile_df = node_shapefile_df.astype('object')
node_shapefile_df['coordinates'] = node_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort node_df by index number
node_sorted_index = node_shapefile_df.index.to_series().str.split('NODE', expand=True)[1].apply(int).sort_values(
ascending=True)
node_shapefile_df = node_shapefile_df.reindex(index=node_sorted_index.index)
for node, row in node_shapefile_df.iterrows():
coord_node = row['geometry'].coords[0]
coord_node_round = (round(coord_node[0], SHAPEFILE_TOLERANCE), round(coord_node[1], SHAPEFILE_TOLERANCE))
node_dict[coord_node_round] = node
# create edge dictionary with pipe lengths and start and end nodes
# complete node dictionary with missing nodes (i.e., joints)
edge_shapefile_df.set_index("Name", inplace=True)
edge_shapefile_df = edge_shapefile_df.astype('object')
edge_shapefile_df['coordinates'] = edge_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort edge_df by index number
edge_sorted_index = edge_shapefile_df.index.to_series().str.split('PIPE', expand=True)[1].apply(int).sort_values(
ascending=True)
edge_shapefile_df = edge_shapefile_df.reindex(index=edge_sorted_index.index)
# assign edge properties
edge_shapefile_df['start node'] = ''
edge_shapefile_df['end node'] = ''
for pipe, row in edge_shapefile_df.iterrows():
# get the length of the pipe and add to dataframe
edge_coords = row['geometry'].coords
edge_shapefile_df.loc[pipe, 'length_m'] = row['geometry'].length
start_node = (round(edge_coords[0][0], SHAPEFILE_TOLERANCE), round(edge_coords[0][1], SHAPEFILE_TOLERANCE))
end_node = (round(edge_coords[1][0], SHAPEFILE_TOLERANCE), round(edge_coords[1][1], SHAPEFILE_TOLERANCE))
if start_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'start node'] = node_dict[start_node]
else:
print(f"The start node of {pipe} has no match in node_dict, check precision of the coordinates.")
if end_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'end node'] = node_dict[end_node]
else:
print(f"The end node of {pipe} has no match in node_dict, check precision of the coordinates.")
return node_shapefile_df, edge_shapefile_df
def get_thermal_network_from_shapefile(locator, network_type, network_name):
"""
This function reads the existing node and pipe network from a shapefile and produces an edge-node incidence matrix
(as defined by Oppelt et al., 2016) as well as the edge properties (length, start node, and end node) and node
coordinates.
"""
# import shapefiles containing the network's edges and nodes
network_edges_df = gpd.read_file(locator.get_network_layout_edges_shapefile(network_type, network_name))
network_nodes_df = gpd.read_file(locator.get_network_layout_nodes_shapefile(network_type, network_name))
# check duplicated NODE/PIPE IDs
duplicated_nodes = network_nodes_df[network_nodes_df.Name.duplicated(keep=False)]
duplicated_edges = network_edges_df[network_edges_df.Name.duplicated(keep=False)]
if duplicated_nodes.size > 0:
raise ValueError('There are duplicated NODE IDs:', duplicated_nodes)
if duplicated_edges.size > 0:
raise ValueError('There are duplicated PIPE IDs:', duplicated_nodes)
# get node and pipe information
node_df, edge_df = extract_network_from_shapefile(network_edges_df, network_nodes_df)
return edge_df, node_df
def calc_max_diameter(volume_flow_m3s, pipe_catalog, velocity_ms, peak_load_percentage):
volume_flow_m3s_corrected_to_design = volume_flow_m3s * peak_load_percentage / 100
diameter_m = math.sqrt((volume_flow_m3s_corrected_to_design / velocity_ms) * (4 / math.pi))
selection_of_catalog = pipe_catalog.loc[(pipe_catalog['D_int_m'] - diameter_m).abs().argsort()[:1]]
D_int_m = selection_of_catalog['D_int_m'].values[0]
Pipe_DN = selection_of_catalog['Pipe_DN'].values[0]
D_ext_m = selection_of_catalog['D_ext_m'].values[0]
D_ins_m = selection_of_catalog['D_ins_m'].values[0]
return Pipe_DN, D_ext_m, D_int_m, D_ins_m
def calc_head_loss_m(diameter_m, max_volume_flow_rates_m3s, coefficient_friction, length_m):
hf_L = (10.67 / (coefficient_friction ** 1.85)) * (max_volume_flow_rates_m3s ** 1.852) / (diameter_m ** 4.8704)
head_loss_m = hf_L * length_m
return head_loss_m
def calc_linear_thermal_loss_coefficient(diamter_ext_m, diamter_int_m, diameter_insulation_m):
r_out_m = diamter_ext_m / 2
r_in_m = diamter_int_m / 2
r_s_m = diameter_insulation_m / 2
k_pipe_WpermK = 58.7 # steel pipe
k_ins_WpermK = 0.059 # scalcium silicate insulation
resistance_mKperW = ((math.log(r_out_m / r_in_m) / k_pipe_WpermK) + (math.log(r_s_m / r_out_m) / k_ins_WpermK))
K_WperKm = 2 * math.pi / resistance_mKperW
return K_WperKm
def calc_thermal_loss_per_pipe(T_in_K, m_kgpers, T_ground_K, k_kWperK):
T_out_K = calc_temperature_out_per_pipe(T_in_K, m_kgpers, k_kWperK, T_ground_K)
DT = T_in_K - T_out_K
Q_loss_kWh = DT * m_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
return Q_loss_kWh
def thermal_network_simplified(locator, config, network_name):
# local variables
network_type = config.thermal_network.network_type
min_head_substation_kPa = config.thermal_network.min_head_substation
thermal_transfer_unit_design_head_m = min_head_substation_kPa * 1000 / M_WATER_TO_PA
coefficient_friction_hazen_williams = config.thermal_network.hw_friction_coefficient
velocity_ms = config.thermal_network.peak_load_velocity
fraction_equivalent_length = config.thermal_network.equivalent_length_factor
peak_load_percentage = config.thermal_network.peak_load_percentage
# GET INFORMATION ABOUT THE NETWORK
edge_df, node_df = get_thermal_network_from_shapefile(locator, network_type, network_name)
# GET INFORMATION ABOUT THE DEMAND OF BUILDINGS AND CONNECT TO THE NODE INFO
# calculate substations for all buildings
# local variables
total_demand = pd.read_csv(locator.get_total_demand())
volume_flow_m3pers_building = pd.DataFrame()
T_sup_K_building = pd.DataFrame()
T_re_K_building = pd.DataFrame()
Q_demand_kWh_building = pd.DataFrame()
if network_type == "DH":
buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
DHN_barcode = "0"
if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
building_names = [building for building in buildings_name_with_heating if building in
node_df.Building.values]
substation.substation_main_heating(locator, total_demand, building_names, DHN_barcode=DHN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DH", DHN_barcode))
volume_flow_m3pers_building[building_name] = substation_results["mdot_DH_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results["T_supply_DH_result_K"]
T_re_K_building[building_name] = np.where(substation_results["T_return_DH_result_K"] >273.15,
substation_results["T_return_DH_result_K"], np.nan)
Q_demand_kWh_building[building_name] = (substation_results["Q_heating_W"] + substation_results[
"Q_dhw_W"]) / 1000
if network_type == "DC":
buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
DCN_barcode = "0"
if buildings_name_with_cooling != []:
building_names = [building for building in buildings_name_with_cooling if building in
node_df.Building.values]
substation.substation_main_cooling(locator, total_demand, building_names, DCN_barcode=DCN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DC", DCN_barcode))
volume_flow_m3pers_building[building_name] = substation_results[
"mdot_space_cooling_data_center_and_refrigeration_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results[
"T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"]
T_re_K_building[building_name] = substation_results[
"T_return_DC_space_cooling_data_center_and_refrigeration_result_K"]
Q_demand_kWh_building[building_name] = substation_results[
"Q_space_cooling_data_center_and_refrigeration_W"] / 1000
import cea.utilities
with cea.utilities.pushd(locator.get_thermal_network_folder()):
# Create a water network model
wn = wntr.network.WaterNetworkModel()
# add loads
building_base_demand_m3s = {}
for building in volume_flow_m3pers_building.keys():
building_base_demand_m3s[building] = volume_flow_m3pers_building[building].max()
pattern_demand = (volume_flow_m3pers_building[building].values / building_base_demand_m3s[building]).tolist()
wn.add_pattern(building, pattern_demand)
# add nodes
consumer_nodes = []
building_nodes_pairs = {}
building_nodes_pairs_inversed = {}
for node in node_df.iterrows():
if node[1]["Type"] == "CONSUMER":
demand_pattern = node[1]['Building']
base_demand_m3s = building_base_demand_m3s[demand_pattern]
consumer_nodes.append(node[0])
building_nodes_pairs[node[0]] = demand_pattern
building_nodes_pairs_inversed[demand_pattern] = node[0]
wn.add_junction(node[0],
base_demand=base_demand_m3s,
demand_pattern=demand_pattern,
elevation=thermal_transfer_unit_design_head_m,
coordinates=node[1]["coordinates"])
elif node[1]["Type"] == "PLANT":
base_head = int(thermal_transfer_unit_design_head_m*1.2)
start_node = node[0]
name_node_plant = start_node
wn.add_reservoir(start_node,
base_head=base_head,
coordinates=node[1]["coordinates"])
else:
wn.add_junction(node[0],
elevation=0,
coordinates=node[1]["coordinates"])
# add pipes
for edge in edge_df.iterrows():
length_m = edge[1]["length_m"]
edge_name = edge[0]
wn.add_pipe(edge_name, edge[1]["start node"],
edge[1]["end node"],
length=length_m * (1 + fraction_equivalent_length),
roughness=coefficient_friction_hazen_williams,
minor_loss=0.0,
status='OPEN')
# add options
wn.options.time.duration = 8759 * 3600 # this indicates epanet to do one year simulation
wn.options.time.hydraulic_timestep = 60 * 60
wn.options.time.pattern_timestep = 60 * 60
wn.options.solver.accuracy = 0.01
wn.options.solver.trials = 100
# 1st ITERATION GET MASS FLOWS AND CALCULATE DIAMETER
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
max_volume_flow_rates_m3s = results.link['flowrate'].abs().max()
pipe_names = max_volume_flow_rates_m3s.index.values
pipe_catalog = pd.read_excel(locator.get_database_distribution_systems(), sheet_name='THERMAL_GRID')
Pipe_DN, D_ext_m, D_int_m, D_ins_m = zip(
*[calc_max_diameter(flow, pipe_catalog, velocity_ms=velocity_ms, peak_load_percentage=peak_load_percentage) for
flow in max_volume_flow_rates_m3s])
pipe_dn = pd.Series(Pipe_DN, pipe_names)
diameter_int_m = pd.Series(D_int_m, pipe_names)
diameter_ext_m = pd.Series(D_ext_m, pipe_names)
diameter_ins_m = pd.Series(D_ins_m, pipe_names)
# 2nd ITERATION GET PRESSURE POINTS AND MASSFLOWS FOR SIZING PUMPING NEEDS - this could be for all the year
# modify diameter and run simulations
edge_df['Pipe_DN'] = pipe_dn
edge_df['D_int_m'] = D_int_m
for edge in edge_df.iterrows():
edge_name = edge[0]
pipe = wn.get_link(edge_name)
pipe.diameter = diameter_int_m[edge_name]
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# 3rd ITERATION GET FINAL UTILIZATION OF THE GRID (SUPPLY SIDE)
# get accumulated head loss per hour
unitary_head_ftperkft = results.link['headloss'].abs()
unitary_head_mperm = unitary_head_ftperkft * FT_TO_M / (FT_TO_M * 1000)
head_loss_m = unitary_head_mperm.copy()
for column in head_loss_m.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_m[column] = head_loss_m[column] * length_m
reservoir_head_loss_m = head_loss_m.sum(axis=1) + thermal_transfer_unit_design_head_m*1.2 # fixme: only one thermal_transfer_unit_design_head_m from one substation?
# apply this pattern to the reservoir and get results
base_head = reservoir_head_loss_m.max()
pattern_head_m = (reservoir_head_loss_m.values / base_head).tolist()
wn.add_pattern('reservoir', pattern_head_m)
reservoir = wn.get_node(name_node_plant)
reservoir.head_timeseries.base_value = int(base_head)
reservoir.head_timeseries._pattern = 'reservoir'
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# POSTPROCESSING
# $ POSTPROCESSING - PRESSURE/HEAD LOSSES PER PIPE PER HOUR OF THE YEAR
# at the pipes
unitary_head_loss_supply_network_ftperkft = results.link['headloss'].abs()
linear_pressure_loss_Paperm = unitary_head_loss_supply_network_ftperkft * FT_WATER_TO_PA / (FT_TO_M * 1000)
head_loss_supply_network_Pa = linear_pressure_loss_Paperm.copy()
for column in head_loss_supply_network_Pa.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_supply_network_Pa[column] = head_loss_supply_network_Pa[column] * length_m
head_loss_return_network_Pa = head_loss_supply_network_Pa.copy(0)
# at the substations
head_loss_substations_ft = results.node['head'][consumer_nodes].abs()
head_loss_substations_Pa = head_loss_substations_ft * FT_WATER_TO_PA
#POSTPORCESSING MASSFLOW RATES
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
# $ POSTPROCESSING - PRESSURE LOSSES ACCUMULATED PER HOUR OF THE YEAR (TIMES 2 to account for return)
accumulated_head_loss_supply_Pa = head_loss_supply_network_Pa.sum(axis=1)
accumulated_head_loss_return_Pa = head_loss_return_network_Pa.sum(axis=1)
accumulated_head_loss_substations_Pa = head_loss_substations_Pa.sum(axis=1)
accumulated_head_loss_total_Pa = accumulated_head_loss_supply_Pa + accumulated_head_loss_return_Pa + accumulated_head_loss_substations_Pa
# $ POSTPROCESSING - THERMAL LOSSES PER PIPE PER HOUR OF THE YEAR (SUPPLY)
# calculate the thermal characteristics of the grid
temperature_of_the_ground_K = calculate_ground_temperature(locator)
thermal_coeffcient_WperKm = pd.Series(
np.vectorize(calc_linear_thermal_loss_coefficient)(diameter_ext_m, diameter_int_m, diameter_ins_m), pipe_names)
average_temperature_supply_K = T_sup_K_building.mean(axis=1)
thermal_losses_supply_kWh = results.link['headloss'].copy()
thermal_losses_supply_kWh.reset_index(inplace=True, drop=True)
thermal_losses_supply_Wperm = thermal_losses_supply_kWh.copy()
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_supply_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_supply_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
thermal_losses_supply_Wperm[pipe] = (thermal_losses_supply_kWh[pipe] / length_m) * 1000
# return pipes
average_temperature_return_K = T_re_K_building.mean(axis=1)
thermal_losses_return_kWh = results.link['headloss'].copy()
thermal_losses_return_kWh.reset_index(inplace=True, drop=True)
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_return_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_return_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
# WRITE TO DISK
# LINEAR PRESSURE LOSSES (EDGES)
linear_pressure_loss_Paperm.to_csv(locator.get_network_linear_pressure_drop_edges(network_type, network_name),
index=False)
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
massflow_supply_kgs.to_csv(locator.get_thermal_network_layout_massflow_edges_file(network_type, network_name),
index=False)
# VELOCITY (EDGES)
velocity_edges_ms = results.link['velocity'].abs()
velocity_edges_ms.to_csv(locator.get_thermal_network_velocity_edges_file(network_type, network_name),
index=False)
# PRESSURE LOSSES (NODES)
pressure_at_nodes_ft = results.node['pressure'].abs()
pressure_at_nodes_Pa = pressure_at_nodes_ft * FT_TO_M * M_WATER_TO_PA
pressure_at_nodes_Pa.to_csv(locator.get_network_pressure_at_nodes(network_type, network_name), index=False)
# MASS_FLOW_RATE (NODES)
# $ POSTPROCESSING - MASSFLOWRATES PER NODE PER HOUR OF THE YEAR
flow_rate_supply_nodes_m3s = results.node['demand'].abs()
massflow_supply_nodes_kgs = flow_rate_supply_nodes_m3s * P_WATER_KGPERM3
massflow_supply_nodes_kgs.to_csv(locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name),
index=False)
# thermal demand per building (no losses in the network or substations)
Q_demand_Wh_building = Q_demand_kWh_building * 1000
Q_demand_Wh_building.to_csv(locator.get_thermal_demand_csv_file(network_type, network_name), index=False)
# pressure losses total
# $ POSTPROCESSING - PUMPING NEEDS PER HOUR OF THE YEAR (TIMES 2 to account for return)
flow_rate_substations_m3s = results.node['demand'][consumer_nodes].abs()
head_loss_supply_kWperm = (linear_pressure_loss_Paperm * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kWperm = head_loss_supply_kWperm.copy()
pressure_loss_supply_edge_kW = (head_loss_supply_network_Pa * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kW = pressure_loss_supply_edge_kW.copy()
head_loss_substations_kW = (head_loss_substations_Pa * (flow_rate_substations_m3s * 3600)) / (3.6E6 * PUMP_ETA)
accumulated_head_loss_supply_kW = pressure_loss_supply_edge_kW.sum(axis=1)
accumulated_head_loss_return_kW = head_loss_return_kW.sum(axis=1)
accumulated_head_loss_substations_kW = head_loss_substations_kW.sum(axis=1)
accumulated_head_loss_total_kW = accumulated_head_loss_supply_kW + \
accumulated_head_loss_return_kW + \
accumulated_head_loss_substations_kW
head_loss_system_Pa = pd.DataFrame({"pressure_loss_supply_Pa": accumulated_head_loss_supply_Pa,
"pressure_loss_return_Pa": accumulated_head_loss_return_Pa,
"pressure_loss_substations_Pa": accumulated_head_loss_substations_Pa,
"pressure_loss_total_Pa": accumulated_head_loss_total_Pa})
head_loss_system_Pa.to_csv(locator.get_network_total_pressure_drop_file(network_type, network_name),
index=False)
# $ POSTPROCESSING - PLANT HEAT REQUIREMENT
plant_load_kWh = thermal_losses_supply_kWh.sum(axis=1) * 2 + Q_demand_kWh_building.sum(
axis=1) - accumulated_head_loss_total_kW.values
plant_load_kWh.to_csv(locator.get_thermal_network_plant_heat_requirement_file(network_type, network_name),
header=['thermal_load_kW'], index=False)
# pressure losses per piping system
pressure_loss_supply_edge_kW.to_csv(
locator.get_thermal_network_pressure_losses_edges_file(network_type, network_name), index=False)
# pressure losses per substation
head_loss_substations_kW = head_loss_substations_kW.rename(columns=building_nodes_pairs)
head_loss_substations_kW.to_csv(locator.get_thermal_network_substation_ploss_file(network_type, network_name),
index=False)
# pumping needs losses total
pumping_energy_system_kWh = pd.DataFrame({"pressure_loss_supply_kW": accumulated_head_loss_supply_kW,
"pressure_loss_return_kW": accumulated_head_loss_return_kW,
"pressure_loss_substations_kW": accumulated_head_loss_substations_kW,
"pressure_loss_total_kW": accumulated_head_loss_total_kW})
pumping_energy_system_kWh.to_csv(
locator.get_network_energy_pumping_requirements_file(network_type, network_name), index=False)
# pumping needs losses total
temperatures_plant_C = pd.DataFrame({"temperature_supply_K": average_temperature_supply_K,
"temperature_return_K": average_temperature_return_K})
temperatures_plant_C.to_csv(locator.get_network_temperature_plant(network_type, network_name), index=False)
# thermal losses
thermal_losses_supply_kWh.to_csv(locator.get_network_thermal_loss_edges_file(network_type, network_name),
index=False)
thermal_losses_supply_Wperm.to_csv(locator.get_network_linear_thermal_loss_edges_file(network_type, network_name),
index=False)
# thermal losses total
accumulated_thermal_losses_supply_kWh = thermal_losses_supply_kWh.sum(axis=1)
accumulated_thermal_losses_return_kWh = thermal_losses_return_kWh.sum(axis=1)
accumulated_thermal_loss_total_kWh = accumulated_thermal_losses_supply_kWh + accumulated_thermal_losses_return_kWh
thermal_losses_total_kWh = pd.DataFrame({"thermal_loss_supply_kW": accumulated_thermal_losses_supply_kWh,
"thermal_loss_return_kW": accumulated_thermal_losses_return_kWh,
"thermal_loss_total_kW": accumulated_thermal_loss_total_kWh})
thermal_losses_total_kWh.to_csv(locator.get_network_total_thermal_loss_file(network_type, network_name),
index=False)
# return average temperature of supply at the substations
T_sup_K_nodes = T_sup_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_sup_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_sup_K_nodes[node] = average_year
T_sup_K_nodes.to_csv(locator.get_network_temperature_supply_nodes_file(network_type, network_name),
index=False)
# return average temperature of return at the substations
T_return_K_nodes = T_re_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_return_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_return_K_nodes[node] = average_year
T_return_K_nodes.to_csv(locator.get_network_temperature_return_nodes_file(network_type, network_name),
index=False)
# summary of edges used for the calculation
fields_edges = ['length_m', 'Pipe_DN', 'Type_mat', 'D_int_m']
edge_df[fields_edges].to_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
fields_nodes = ['Type', 'Building']
node_df[fields_nodes].to_csv(locator.get_thermal_network_node_types_csv_file(network_type, network_name))
# correct diameter of network and save to the shapefile
from cea.utilities.dbf import dataframe_to_dbf, dbf_to_dataframe
fields = ['length_m', 'Pipe_DN', 'Type_mat']
edge_df = edge_df[fields]
edge_df['name'] = edge_df.index.values
network_edges_df = dbf_to_dataframe(
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
network_edges_df = network_edges_df.merge(edge_df, left_on='Name', right_on='name', suffixes=('_x', ''))
network_edges_df = network_edges_df.drop(['Pipe_DN_x', 'Type_mat_x', 'name', 'length_m_x'], axis=1)
dataframe_to_dbf(network_edges_df,
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
def main(config):
"""
run the whole network summary routine
"""
start = time.time()
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
network_names = config.thermal_network.network_names
if len(network_names) == 0:
network_names = ['']
for network_name in network_names:
thermal_network_simplified(locator, config, network_name)
print("done.")
print(f"total time: {time.time() - start}")
if __name__ == '__main__':
main(cea.config.Configuration())
|
mit
|
ethen8181/machine-learning
|
model_selection/partial_dependence/partial_dependence.py
|
1
|
11957
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import ceil
from joblib import Parallel, delayed
from matplotlib.gridspec import GridSpec
__all__ = ['PartialDependenceExplainer']
class PartialDependenceExplainer:
"""
Partial Dependence explanation [1]_.
- Supports scikit-learn like classification and regression classifiers.
- Works for both numerical and categorical columns.
Parameters
----------
estimator : sklearn-like classifier
Model that was fitted on the data.
n_grid_points : int, default 50
Number of grid points used in replacement
for the original numeric data. Only used
if the targeted column is numeric. For categorical
column, the number of grid points will always be
the distinct number of categories in that column.
Smaller number of grid points serves as an
approximation for the total number of unique
points and will result in faster computation
batch_size : int, default = 'auto'
Compute partial depedence prediction batch by batch to save
memory usage, the default batch size will be
ceil(number of rows in the data / the number of grid points used)
n_jobs : int, default 1
Number of jobs to run in parallel, if the model already fits
extremely fast on the data, then specify 1 so that there's no
overhead of spawning different processes to do the computation
verbose : int, default 1
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout. The frequency of the messages increases
with the verbosity level. If it more than 10, all iterations are reported.
pre_dispatch : int or str, default '2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. Possible inputs:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Attributes
----------
feature_name_ : str
The input feature_name to the .fit unmodified, will
be used in subsequent method.
feature_type_ : str
The input feature_type to the .fit unmodified, will
be used in subsequent method.
feature_grid_ : 1d ndarray
Unique grid points that were used to generate the
partial dependence result.
results : list of DataFrame
Partial dependence result. If it's a classification
estimator then each index of the list is the result
for each class. On the other hand, if it's a regression
estimator, it will be a list with 1 element.
References
----------
.. [1] `Python partial dependence plot toolbox
<https://github.com/SauceCat/PDPbox>`_
"""
def __init__(self, estimator, n_grid_points = 50, batch_size = 'auto',
n_jobs = 1, verbose = 1, pre_dispatch = '2*n_jobs'):
self.n_jobs = n_jobs
self.verbose = verbose
self.estimator = estimator
self.pre_dispatch = pre_dispatch
self.n_grid_points = n_grid_points
def fit(self, data, feature_name, feature_type):
"""
Obtain the partial dependence result.
Parameters
----------
data : DataFrame, shape [n_samples, n_features]
Input data to the estimator/model.
feature_name : str
Feature's name in the data what we wish to explain.
feature_type : str, {'num', 'cat'}
Specify whether feature_name is a numerical or
categorical column.
Returns
-------
self
"""
# check whether it's a classification or regression model
estimator = self.estimator
try:
n_classes = estimator.classes_.size
is_classifier = True
predict = estimator.predict_proba
except AttributeError:
# for regression problem, still set the
# number of classes to 1 to initialize
# the loop later downstream
n_classes = 1
is_classifier = False
predict = estimator.predict
target = data[feature_name]
unique_target = np.unique(target)
n_unique = unique_target.size
if feature_type == 'num':
if self.n_grid_points >= n_unique:
feature_grid = unique_target
else:
# when the number of required grid points is smaller than the number of
# unique values, we choose the percentile points to make sure the grid points
# span widely across the whole value range
percentile = np.percentile(target, np.linspace(0, 100, self.n_grid_points))
feature_grid = np.unique(percentile)
feature_cols = feature_grid
else:
feature_grid = unique_target
feature_cols = np.asarray(['{}_{}'.format(feature_name, category)
for category in unique_target])
# compute prediction batch by batch to save memory usage
n_rows = data.shape[0]
batch_size = ceil(n_rows / feature_grid.size)
parallel = Parallel(
n_jobs = self.n_jobs, verbose = self.verbose, pre_dispatch = self.pre_dispatch)
outputs = parallel(delayed(_predict_batch)(data_batch,
feature_grid,
feature_name,
is_classifier,
n_classes,
predict)
for data_batch in _data_iter(data, batch_size))
results = []
for output in zip(*outputs):
result = pd.concat(output, ignore_index = True)
result.columns = feature_cols
results.append(result)
self.results_ = results
self.feature_name_ = feature_name
self.feature_grid_ = feature_grid
self.feature_type_ = feature_type
return self
def plot(self, centered = True, target_class = 0):
"""
Use the partial dependence result to generate
a partial dependence plot (using matplotlib).
Parameters
----------
centered : bool, default True
Center the partial dependence plot by subtacting every partial
dependence result table's column value with the value of the first
column, i.e. first column's value will serve as the baseline
(centered at 0) for all other values.
target_class : int, default 0
The target class to show for the partial dependence result,
for regression task, we can leave the default number unmodified,
but for classification task, we should specify the target class
parameter to meet our needs
Returns
-------
figure
"""
figure = GridSpec(5, 1)
ax1 = plt.subplot(figure[0, :])
self._plot_title(ax1)
ax2 = plt.subplot(figure[1:, :])
self._plot_content(ax2, centered, target_class)
return figure
def _plot_title(self, ax):
font_family = 'Arial'
title = 'Partial Dependence Plot for {}'.format(self.feature_name_)
subtitle = 'Number of unique grid points: {}'.format(self.feature_grid_.size)
title_fontsize = 15
subtitle_fontsize = 12
ax.set_facecolor('white')
ax.text(
0, 0.7, title,
fontsize = title_fontsize, fontname = font_family)
ax.text(
0, 0.4, subtitle, color = 'grey',
fontsize = subtitle_fontsize, fontname = font_family)
ax.axis('off')
def _plot_content(self, ax, centered, target_class):
# pd (partial dependence)
pd_linewidth = 2
pd_markersize = 5
pd_color = '#1A4E5D'
fill_alpha = 0.2
fill_color = '#66C2D7'
zero_linewidth = 1.5
zero_color = '#E75438'
xlabel_fontsize = 10
results = self.results_[target_class]
feature_cols = results.columns
if self.feature_type_ == 'cat':
# ticks = all the unique categories
x = range(len(feature_cols))
ax.set_xticks(x)
ax.set_xticklabels(feature_cols)
else:
x = feature_cols
# center the partial dependence plot by subtacting every value
# with the value of the first column, i.e. first column's value
# will serve as the baseline (centered at 0) for all other values
pd = results.values.mean(axis = 0)
if centered:
pd -= pd[0]
pd_std = results.values.std(axis = 0)
upper = pd + pd_std
lower = pd - pd_std
ax.plot(
x, pd, color = pd_color, linewidth = pd_linewidth,
marker = 'o', markersize = pd_markersize)
ax.plot(
x, [0] * pd.size, color = zero_color,
linestyle = '--', linewidth = zero_linewidth)
ax.fill_between(x, upper, lower, alpha = fill_alpha, color = fill_color)
ax.set_xlabel(self.feature_name_, fontsize = xlabel_fontsize)
self._modify_axis(ax)
def _modify_axis(self, ax):
tick_labelsize = 8
tick_colors = '#9E9E9E'
tick_labelcolor = '#424242'
ax.tick_params(
axis = 'both', which = 'major', colors = tick_colors,
labelsize = tick_labelsize, labelcolor = tick_labelcolor)
ax.set_facecolor('white')
ax.get_yaxis().tick_left()
ax.get_xaxis().tick_bottom()
for direction in ('top', 'left', 'right', 'bottom'):
ax.spines[direction].set_visible(False)
for axis in ('x', 'y'):
ax.grid(True, 'major', axis, ls = '--', lw = .5, c = 'k', alpha = .3)
def _data_iter(data, batch_size):
"""Used by PartialDependenceExplainer to loop through the data by batch"""
n_rows = data.shape[0]
for i in range(0, n_rows, batch_size):
yield data[i:i + batch_size].reset_index(drop = True)
def _predict_batch(data_batch, feature_grid, feature_name,
is_classifier, n_classes, predict):
"""Used by PartialDependenceExplainer to generate prediction by batch"""
# repeat the index and use it to slice the data to create the repeated data
# instead of creating the repetition using the values, i.e.
# np.repeat(data_batch.values, repeats = feature_grid.size, axis = 0)
# this prevents everything from getting converted to a different data type, e.g.
# if there is 1 object type column then everything would get converted to object
index_batch = np.repeat(data_batch.index.values, repeats = feature_grid.size)
ice_data = data_batch.iloc[index_batch].copy()
ice_data[feature_name] = np.tile(feature_grid, data_batch.shape[0])
results = []
prediction = predict(ice_data)
for n_class in range(n_classes):
if is_classifier:
result = prediction[:, n_class]
else:
result = prediction
# reshape tiled data back to original batch's shape
reshaped = result.reshape((data_batch.shape[0], feature_grid.size))
result = pd.DataFrame(reshaped)
results.append(result)
return results
|
mit
|
andaag/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
278
|
3402
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
kmike/scikit-learn
|
examples/covariance/plot_covariance_estimation.py
|
4
|
4992
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
The usual estimator for covariance is the maximum likelihood estimator,
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotical optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
pl.plot(pl.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
pl.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
pl.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
pl.ylim(ymin, ymax)
pl.xlim(xmin, xmax)
pl.legend()
pl.show()
|
bsd-3-clause
|
leesavide/pythonista-docs
|
Documentation/matplotlib/mpl_examples/pylab_examples/hatch_demo.py
|
12
|
1095
|
"""
Hatching (pattern filled polygons) is supported currently in the PS,
PDF, SVG and Agg backends only.
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax1.bar(range(1,5), range(1,5), color='red', edgecolor='black', hatch="/")
ax1.bar(range(1,5), [6] * 4, bottom=range(1,5), color='blue', edgecolor='black', hatch='//')
ax1.set_xticks([1.5,2.5,3.5,4.5])
ax2 = fig.add_subplot(132)
bars = ax2.bar(range(1,5), range(1,5), color='yellow', ecolor='black') + \
ax2.bar(range(1, 5), [6] * 4, bottom=range(1,5), color='green', ecolor='black')
ax2.set_xticks([1.5,2.5,3.5,4.5])
patterns = ('-', '+', 'x', '\\', '*', 'o', 'O', '.')
for bar, pattern in zip(bars, patterns):
bar.set_hatch(pattern)
ax3 = fig.add_subplot(133)
ax3.fill([1,3,3,1],[1,1,2,2], fill=False, hatch='\\')
ax3.add_patch(Ellipse((4,1.5), 4, 0.5, fill=False, hatch='*'))
ax3.add_patch(Polygon([[0,0],[4,1.1],[6,2.5],[2,1.4]], closed=True,
fill=False, hatch='/'))
ax3.set_xlim((0,6))
ax3.set_ylim((0,2.5))
plt.show()
|
apache-2.0
|
elkingtonmcb/sympy
|
sympy/interactive/printing.py
|
31
|
15830
|
"""Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion as V
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
return latex_to_png(o)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError:
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
try:
return _matplotlib_wrapper(s)
except Exception:
# Matplotlib.mathtext cannot render some things (like
# matrices)
return None
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if V(IPython.__version__) >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def _is_ipython(shell):
"""Is a shell instance an IPython shell?"""
# shortcut, so we don't import IPython if we don't have to
if 'IPython' not in sys.modules:
return False
try:
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
# IPython < 0.11
try:
from IPython.iplib import InteractiveShell
except ImportError:
# Reaching this points means IPython has changed in a backward-incompatible way
# that we don't know about. Warn?
return False
return isinstance(shell, InteractiveShell)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
in_ipython = False
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
else:
in_ipython = (ip is not None)
if ip and not in_ipython:
in_ipython = _is_ipython(ip)
if in_ipython and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if in_ipython:
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
|
bsd-3-clause
|
bluescarni/hyperion
|
docs/tutorials/scripts/class2_sed_plot_components.py
|
2
|
1237
|
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
m = ModelOutput('class2_sed.rtout')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Total SED
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc)
ax.loglog(sed.wav, sed.val, color='black', lw=3, alpha=0.5)
# Direct stellar photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='source_emit')
ax.loglog(sed.wav, sed.val, color='blue')
# Scattered stellar photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='source_scat')
ax.loglog(sed.wav, sed.val, color='teal')
# Direct dust photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='dust_emit')
ax.loglog(sed.wav, sed.val, color='red')
# Scattered dust photons
sed = m.get_sed(inclination=0, aperture=-1, distance=300 * pc,
component='dust_scat')
ax.loglog(sed.wav, sed.val, color='orange')
ax.set_xlabel(r'$\lambda$ [$\mu$m]')
ax.set_ylabel(r'$\lambda F_\lambda$ [ergs/s/cm$^2$]')
ax.set_xlim(0.1, 2000.)
ax.set_ylim(2.e-16, 2.e-9)
fig.savefig('class2_sed_plot_components.png')
|
bsd-2-clause
|
tody411/GuidedFilter
|
guided_filter/results/smooth_noise.py
|
1
|
2303
|
# -*- coding: utf-8 -*-
## @package guided_filter.results.smooth_noise
#
# Simple guided filter test.
# @author tody
# @date 2015/08/26
import os
import numpy as np
import matplotlib.pyplot as plt
from guided_filter.datasets.google_image import dataFile
from guided_filter.results.results import resultFile
from guided_filter.io_util.image import loadRGB
from guided_filter.cv.image import to32F
from guided_filter.core.filters import FastGuidedFilter, GuidedFilter
def runSmoothNoiseResult(image_file):
image_name = os.path.basename(image_file)
image_name = os.path.splitext(image_name)[0]
C_8U = loadRGB(image_file)
C_32F = to32F(C_8U)
aspect = C_32F.shape[0] / float(C_32F.shape[1])
fig_width = 10
fig_height = int(2 * fig_width * aspect / 3) + 2
fig = plt.figure(figsize=(fig_width, fig_height))
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.82, wspace=0.02, hspace=0.3)
h, w = C_32F.shape[:2]
image_size_str = "Image size: %s x %s" %(w, h)
fig.suptitle("Filtering noise image\n%s" % image_size_str)
plt.subplot(231)
plt.title("Original")
plt.imshow(C_32F)
plt.axis('off')
h, w, cs = C_32F.shape
C_noise = np.float32(C_32F + 0.3 * np.random.rand(h, w, cs))
C_noise = np.clip(C_noise, 0.0, 1.0)
plt.subplot(232)
plt.title("Noise")
plt.imshow(C_noise)
plt.axis('off')
sigmas = [5, 10, 20]
plot_id = 234
for sigma in sigmas:
guided_filter = FastGuidedFilter(C_noise, radius=sigma, epsilon=0.02)
C_smooth = guided_filter.filter(C_noise)
C_smooth = np.clip(C_smooth, 0.0, 1.0)
plt.subplot(plot_id)
plt.title("Filtered ($r$=%s)" %sigma)
plt.imshow(C_smooth)
plt.axis('off')
plot_id +=1
result_file = resultFile(image_name)
plt.savefig(result_file)
def runSmoothNoiseResults(data_names, data_ids):
for data_name in data_names:
print "Smooth noise: %s" % data_name
for data_id in data_ids:
print "Data ID: %s" % data_id
image_file = dataFile(data_name, data_id)
runSmoothNoiseResult(image_file)
if __name__ == '__main__':
data_names = ["flower"]
data_ids = range(3)
runSmoothNoiseResults(data_names, data_ids)
|
mit
|
urschrei/geopandas
|
geopandas/plotting.py
|
1
|
12873
|
from __future__ import print_function
import warnings
import numpy as np
from six import next
from six.moves import xrange
from shapely.geometry import Polygon
def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(poly.exterior)
if poly.has_z:
poly = Polygon(zip(*poly.exterior.xy))
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, linewidth=0, alpha=alpha)) # linewidth=0 because boundaries are drawn separately
ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor, linewidth=linewidth)
def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth)
def plot_linestring(ax, geom, color='black', linewidth=1.0):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth)
def plot_multilinestring(ax, geom, color='red', linewidth=1.0):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom.type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth)
elif geom.type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth)
def plot_point(ax, pt, marker='o', markersize=2, color='black'):
""" Plot a single Point geometry """
ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, color=color)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0,
figsize=None, **color_kwds):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
linewidth : float (default 1.0)
Line width for geometries.
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to plot_polygon
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
color_generator = gencolor(len(s), colormap=cmap)
for geom in s:
if color is None:
col = next(color_generator)
else:
col = color
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=col, linewidth=linewidth)
elif geom.type == 'Point':
plot_point(ax, geom, color=col)
plt.draw()
return ax
def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0,
categorical=False, legend=False, ax=None,
scheme=None, k=5, vmin=None, vmax=None, figsize=None,
**color_kwds):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted.
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
linewidth : float (default 1.0)
Line width for geometries.
legend : bool (default False)
Plot a legend (Experimental; currently for categorical
plots only)
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes (requires PySAL)
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
figsize
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to plot_polygon
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, cmap=cmap, color=color,
ax=ax, linewidth=linewidth, figsize=figsize,
**color_kwds)
else:
if s[column].dtype is np.dtype('O'):
categorical = True
if categorical:
if cmap is None:
cmap = 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = [valuemap[k] for k in s[column]]
else:
values = s[column]
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
values = binning.yb
# set categorical to True for creating the legend
categorical = True
binedges = [binning.yb.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
cmap = norm_cmap(values, cmap, Normalize, cm, vmin=vmin, vmax=vmax)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
for geom, value in zip(s.geometry, values):
if color is None:
col = cmap.to_rgba(value)
else:
col = color
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=col, linewidth=linewidth)
elif geom.type == 'Point':
plot_point(ax, geom, color=col)
if legend:
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=color_kwds.get('alpha', 0.5),
markersize=10, markerfacecolor=cmap.to_rgba(value)))
ax.legend(patches, categories, numpoints=1, loc='best')
else:
# TODO: show a colorbar
raise NotImplementedError
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme
['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
s0 = scheme
scheme = scheme.lower()
if scheme not in schemes:
scheme = 'quantiles'
warnings.warn('Unrecognized scheme "{0}". Using "Quantiles" '
'instead'.format(s0), UserWarning, stacklevel=3)
if k < 2 or k > 9:
warnings.warn('Invalid k: {0} (2 <= k <= 9), setting k=5 '
'(default)'.format(k), UserWarning, stacklevel=3)
k = 5
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword")
def norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
vmin
Minimum value of colormap. If None, uses min(values).
vmax
Maximum value of colormap. If None, uses max(values).
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
mn = min(values) if vmin is None else vmin
mx = max(values) if vmax is None else vmax
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap
|
bsd-3-clause
|
ksmet1977/pyKS_lib
|
ciecolor/spd.py
|
1
|
12457
|
# -*- coding: utf-8 -*-
"""
pylib_KS: spectrum related function definitions
Created on Wed Jun 10 15:47:05 2015
@author: kevin.smet
"""
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# explicitely define all variables and functions in current module for export
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
__all__=['blackbody','daylightphase','CCT2CIEref','get_lib_spd',\
'CIEinterp','getlambdaspacing','setlambdas','normalize','plotspd']
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# module imports
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
from .. import *
from ..general import *
from . import *
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# start module function definitions
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#------------------------------------------------------------------------------
# COLORIMETRIC functions:
#
# CIE illuminants:
# blackbody()
# daylightphase()
# CCT2CIEref(): calculate CIE reference illuminant based on CCT
# get_lib_spd(): get spd from library
#
# SPD related functions:
# CIEinterp(): interpolate/extrapolate SPD following CIE15-2004
# getlambdaspacing(): get wavelength spacing when set of lambdas are supplied
# setlambdas(): switch to a different lambda range (using CIEinterp)
# normalize(): normalize spd according to max, area, lambda, ...
#
# Plot functions:
# plotspd(): plot spd
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#---CIE illuminants------------------------------------------------------------
#------------------------------------------------------------------------------
def blackbody(CCT,l0=None,ln=None,ld=None):
"""
Calculate blackbody radiator spd for temperature = CCT
"""
CCT=float(CCT)
lambdas=getlambdas(l0,ln,ld)
Sr=(1/np.pi)*_BBc1*((lambdas*1e-9)**(-5))*(_BBn**(-2))*(np.exp(_BBc2*((_BBn*lambdas*1e-9*CCT)**(-1)))-1)**(-1)
Sr560=(1/np.pi)*_BBc1*((560*1e-9)**(-5))*(_BBn**(-2))*(np.exp(_BBc2*((_BBn*560*1e-9*CCT)**(-1)))-1)**(-1)
Sr=Sr/Sr560
Sr = stackarrays((lambdas,Sr))
return Sr
#------------------------------------------------------------------------------
def daylightphase(CCT,l0=None,ln=None,ld=None):
"""
Calculate blackbody radiator spd for temperature = CCT
"""
CCT=float(CCT)
if CCT <= 4000:
print 'Warning daylightphase spd not defined below 4000 K. Using blackbody radiator instead.'
Sr = blackbody(CCT,l0,ln,ld)
else:
lambdas=getlambdas(l0,ln,ld)
if (4000<CCT) & (CCT<7000 ):
xD=-4.607*(1e9/CCT**3)+2.9678*(1e6/CCT**2)+0.09911*(1000/CCT)+0.244063
elif CCT>7000:
xD=-2.0064*(1e9/CCT**3)+1.9018*(1e6/CCT**2)+0.24748*(1000/CCT)+0.23704;
yD=-3*xD**2+2.87*xD-0.275;
M1=(-1.3515-1.7703*xD+5.9114*yD)/(0.0241+0.2562*xD-0.7341*yD)
M2=(0.03-31.4424*xD+30.0717*yD)/(0.0241+0.2562*xD-0.7341*yD)
Sr=_C012_daylightphase[1,:]+M1*_C012_daylightphase[2,:]+M2*_C012_daylightphase[3,:]
Sr560=Sr[np.where(_C012_daylightphase[0,:]==560)[0]]
Sr=Sr/Sr560
Sr[Sr==float('NaN')]=0
Sr = stackarrays((lambdas,Sr))
return Sr
#------------------------------------------------------------------------------
def CCT2CIEref(CCT,l0=None,ln=None,ld=None):
if CCT < 5000:
Sr = blackbody(CCT,l0,ln,ld)
else:
Sr = daylightphase(CCT,l0,ln,ld)
return Sr
#------------------------------------------------------------------------------
def get_lib_spd(name_,l0=None,ln=None,ld=None,type_='spd'): #library of predefined spectra
"""
Get predefined spectra, and fill object.
"""
try:
LS=eval(name_)
if name_ in CIEilluminants: type_='CIE illuminant'
except:
raise Exception('Requested spd not in library.')
lambdas=np.atleast_2d(LS[0])
S=np.atleast_2d(LS[1:])
#interpolate S to self.lambdas
if (l0!=None) | (ln!=None) | (ld!=None):
if l0==None: l0=lambdas[0,1]
if ln==None: ln=lambdas[0,-1]
if ld==None: ld=np.abs(np.diff(lambdas)[0])
lambdas = linspaceD(l0,ln,ld,1) #override previous lambdas if new l0,ln or ld are supplied
S = CIEinterp(LS,lambdas,type_,1)
S = np.atleast_2d(S[1:])
LS = stackarrays((lambdas,S))
del S
return LS
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SPD related functions:
# CIEinterp(): interpolate/extrapolate SPD following CIE15-2004
# getlambdaspacing(): get wavelength spacing when set of lambdas are supplied
# setlambdas(): switch to a different lambda range (using CIEinterp)
# normalize(): normalize spd according to max, area, lambda, ...
#------------------------------------------------------------------------------
def CIEinterp(SS,targetlambdas,type_='spectrum',kind_=None):
"""
Interpolate spd's in SPD object to target wavelengths.
To avoid issues with peaked spectra, not all types of SPD can be interpolated:
(-->e.g. meas. spectra)
"""
#Only certain spds may be interpolated
if type_.lower()=='spectrum':
interpolatable = 0
elif type_.lower()=='meas. spd':
interpolatable = 0
elif type_.lower()=='calc. spd':
interpolatable = 1
elif type_.lower()=='sim. spd':
interpolatable = 1
else:
interpolatable = 1
#Use correct type of interpolation
if interpolatable==1:
if kind_==None:
if (type_.lower()=='cmf') | (type_.lower()=='lms')\
| (type_.lower()=='rfl') | (type_.lower()=='r')\
| (type_.lower()=='trm') | (type_.lower()=='t'):
kind_=_R_CMF_interp
else:
kind_=_S_interp
else:
kind_=_S_interp
S = []
lambdas=np.atleast_2d(SS[0])
SS=np.atleast_2d(SS[1:])
targetlambdas=np.atleast_2d(targetlambdas)
targetlambdas_=np.atleast_2d(targetlambdas[(targetlambdas>=lambdas[0][0])&(targetlambdas<=lambdas[0][-1])])#interpolate this range first, then extrapolate
for i in xrange(SS.shape[0]):
Si_f=interpolate.interp1d(lambdas[0],SS[i],kind=kind_)
Si =np.atleast_2d(Si_f(targetlambdas_))
Si[Si<0]=0 #no negative values allowed
#extrapolate by replicating closest know value (conform CIE2004 recommendation)
if isequalall(targetlambdas):
ld=np.abs(targetlambdas[0][0]-targetlambdas[0][1])
if targetlambdas[0][0]<lambdas[0][0]:
Si=np.hstack((np.atleast_2d(np.tile(Si[0][0],int(np.abs(targetlambdas[0][0]-lambdas[0][0])/ld))),Si))
if targetlambdas[0][-1]>lambdas[0][-1]:
Si=np.hstack((Si,np.atleast_2d(np.tile(Si[0][-1],int(np.abs(targetlambdas[0][-1]-lambdas[0][-1])/ld)))))
else:
raise NotImplementedError('Extrapolation for unequal lambda spacing not implemented')
if i==0:
S=Si
else:
S=np.vstack((S,Si))
lambdas = targetlambdas
return stackarrays((targetlambdas,S),dim_=0)
else:
raise NotImplementedError('To avoid problems with peaked spectra, interpolation is not implemented \nfor SPD of type ''spectrum'' or ''meas. spec!')
#------------------------------------------------------------------------------
def getlambdas(l0=None,ln=None,ld=None):
if l0==None: l0=_l0
if ln==None: ln=_ln
if ld==None: ld=_ld
return linspaceD(l0,ln,ld)
#------------------------------------------------------------------------------
def getlambdaspacing(lambdas):
d=np.diff(lambdas)
ld=np.atleast_2d(np.hstack((d[0,0],d[0,0:-1]/2.0,d[0,-1]))+np.hstack((0,d[0,1:]/2.0,0)))
if isequalall(ld): ld=ld[0,0]
return ld
#------------------------------------------------------------------------------
def setlambdas(LS,type_='spectrum',l0=None,ln=None,ld=None):
"""
Reset lambdas and interpolate SPD. Only allowed for certain types of spd.
"""
if (type_.lower() =='spectrum') | ('meas' in type_):
print 'Warning: spectrum not interpolated, because of type_.\nSetting new lambdas.' #do not interpolate!
else:
#interpolate S to lambdas
lambdas = np.atleast_2d(LS[0])#get old lambdas
if (l0!=None) | (ln!=None) | (ld!=None):
if l0==None: l0=lambdas[0,1]
if ln==None: ln=lambdas[0,-1]
if ld==None: ld=np.abs(np.diff(lambdas)[0])
lambdas = linspaceD(l0,ln,ld,1) #override previous lambdas if new l0,ln or ld are supplied
S=LS
S = CIEinterp(S,lambdas,type_,1)
S = np.atleast_2d(S[1:])
lambdas = np.atleast_2d(S[0])
LS = S
del S
return LS
#------------------------------------------------------------------------------
def normalize(LS,normtype_=None,lambda_=560):
if normtype_!=None:
lambdas = np.atleast_2d(LS[0])
S=np.atleast_2d(LS[1:])
for i in xrange(S.shape[0]):
Si = S[i]
if normtype_=='max':
S[i]=Si/np.max(Si)
elif normtype_=='area':
S[i]=Si/(np.sum(Si)*getlambdaspacing(lambdas))
elif normtype_=='lambda':
d=lambdas-lambda_
index = np.where((d==np.min(np.abs(d)))[0])[0]
S[i]=Si/Si[index[0]]
else:
print 'Warning normalization type not supported.Keeping original data.'
LS=stackarrays((lambdas,S))
return LS
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#----plot functions------------------------------------------------------------
#------------------------------------------------------------------------------
def plotspd(LS,plotnr=None,holdonoff='on',name_='',type_=None,ylabel = '',**kwargs):
"""
Plot spd's.
"""
#import matplotlib.pyplot as plt
#initialize window for figure
lambdas=np.atleast_2d(LS[0])
S=np.atleast_2d(LS[1:])
if plotnr==None:
plt.figure()
else:
plt.figure(plotnr)
plt.hold(holdonoff)
Nspds=int(LS.shape[0])-1
#set spdname for title
if (Nspds==1):
spdname_=name_
else:
spdname_='all spectra'
#set colorVal
colorVals=setcustomcolormap(Nspds)
for i in xrange(Nspds):
if Nspds>1:
Si=S[i,:]
else:
Si=S
if 'color' not in kwargs:
if Nspds>1:
colorVal=colorVals[i]
else:
colorVal=colorVals[0]
else:
colorVals = kwargs['color']
if i<=len(colorVals):
colorVal=colorVals[i]
else:
colorVal=_colorVal#'k'
del kwargs['color']
Si=np.squeeze(Si)
plt.plot(lambdas[0],Si,color=colorVal,**kwargs)
if type_!=None:
if (type_.lower()=='spectrum') | ('spd' in type_.lower()) | ('ill' in type_.lower()):
plt.ylabel('Intensity')
plt.title(type_[0].upper()+type_[1:]+': ' + spdname_)
elif type_.lower()[0] =='r':
plt.ylabel('Spectral reflectance')
plt.title('Reflectance: ' + spdname_)
elif type_.lower()[0] =='t':
plt.ylabel('Spectral transmittance')
plt.title('Transmittance: ' + spdname_)
else:
plt.ylabel(ylabel)
pass
else:
plt.ylabel(ylabel)
plt.xlabel('Wavelength (nm)')
plt.xlim((lambdas[0,1],lambdas[0,-1]))
plt.ylim((0,np.max(S)*1.05))
plt.show()
#show_plot(plotnr)
|
mit
|
sinhrks/expandas
|
pandas_ml/imbaccessors/base.py
|
2
|
1319
|
#!/usr/bin/env python
from pandas_ml.compat import cache_readonly
from pandas_ml.core.accessor import _AccessorMethods
class ImbalanceMethods(_AccessorMethods):
"""
Accessor to ``imblearn``.
"""
_module_name = 'imblearn'
@property
def under_sampling(self):
"""Property to access ``imblearn.under_sampling``"""
return self._under_sampling
@cache_readonly
def _under_sampling(self):
return _AccessorMethods(self._df, module_name='imblearn.under_sampling')
@property
def over_sampling(self):
"""Property to access ``imblearn.over_sampling``"""
return self._over_sampling
@cache_readonly
def _over_sampling(self):
return _AccessorMethods(self._df, module_name='imblearn.over_sampling')
@property
def combine(self):
"""Property to access ``imblearn.combine``"""
return self._combine
@cache_readonly
def _combine(self):
return _AccessorMethods(self._df, module_name='imblearn.combine')
@property
def ensemble(self):
"""Property to access ``imblearn.ensemble``"""
return self._ensemble
@cache_readonly
def _ensemble(self):
return _AccessorMethods(self._df, module_name='imblearn.ensemble')
|
bsd-3-clause
|
doncat99/StockRecommendSystem
|
Source/StockPrediction/Stock_Prediction_Recommand_System.py
|
1
|
15628
|
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from Stock_Prediction_Base import base_model
# from Stock_Prediction_Data_Processing import get_all_stocks_label_possibility_data, preprocessing_data
class recommand_system_model(base_model):
# build model
def build_model(self, CategoricalColumnName, ContinuousColumnName, flags):
self.load_training_model()
print('build Recommand System model...')
# Continuous base columns.
deep_columns = []
for column in ContinuousColumnName:
deep_columns.append(tf.contrib.layers.real_valued_column(column))
# 离散分类别的
wide_columns = []
for column in CategoricalColumnName:
temp = tf.contrib.layers.sparse_column_with_hash_bucket(column, hash_bucket_size=100)
wide_columns.append(temp)
deep_columns.append(tf.contrib.layers.embedding_column(temp, dimension=8))
# open = tf.contrib.layers.real_valued_column("Open")
# high = tf.contrib.layers.real_valued_column("High")
# low = tf.contrib.layers.real_valued_column("Low")
# close = tf.contrib.layers.real_valued_column("Close")
# volume = tf.contrib.layers.real_valued_column("Volume")
# top_line = tf.contrib.layers.real_valued_column("Top_line")
# middle_block = tf.contrib.layers.real_valued_column("Middle_block")
# bottom_line = tf.contrib.layers.real_valued_column("Bottom_line")
# profit = tf.contrib.layers.real_valued_column("Profit")
#类别转换
#age_buckets = tf.contrib.layers.bucketized_column(age, boundaries= [18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# wide_columns = [analyist, estimate, actual, surprise, earningDay, status,
# tf.contrib.layers.crossed_column([estimate, actual], hash_bucket_size=int(1e4)),
# tf.contrib.layers.crossed_column([analyist, surprise], hash_bucket_size=int(1e6)),
# tf.contrib.layers.crossed_column([surprise, earningDay, status],hash_bucket_size=int(1e4))]
#embedding_column用来表示类别型的变量
# deep_columns = [tf.contrib.layers.embedding_column(analyist , dimension=8),
# tf.contrib.layers.embedding_column(estimate , dimension=8),
# tf.contrib.layers.embedding_column(actual , dimension=8),
# tf.contrib.layers.embedding_column(surprise , dimension=8),
# tf.contrib.layers.embedding_column(earningDay, dimension=8),
# tf.contrib.layers.embedding_column(status , dimension=8),
# open, high, low, close, volume, top_line, middle_block, bottom_line, profit]
if flags.model_type =="wide":
model = tf.contrib.learn.LinearClassifier(model_dir=self.paras.model_folder,feature_columns=wide_columns)
elif flags.model_type == "deep":
model = tf.contrib.learn.DNNClassifier(model_dir=self.paras.model_folder, feature_columns=deep_columns, hidden_units=[100,50])
else:
model = tf.contrib.learn.DNNLinearCombinedClassifier(model_dir=self.paras.model_folder, linear_feature_columns=wide_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=[100,50])
self.save_training_model(model)
return model
def save_training_model(self, model):
# Do nothing, by setting model_dir will save the model automatically
return
def load_training_model(self):
if self.paras.load == False:
# Todo: clear the model file
model_file = self.paras.model_folder
#else:
# Do nothing, by setting model_dir will load the model automatically
class recommand_system(recommand_system_model):
def __init__(self, paras):
super(recommand_system, self).__init__(paras=paras)
def input_fn(self, df, y, CONTINUOUS_COLUMNS, CATEGORICAL_COLUMNS):
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# 原文例子为dense_shape
categorical_cols = {k: tf.SparseTensor(indices=[[i,0] for i in range(df[k].size)], values = df[k].values, dense_shape=[df[k].size,1]) for k in CATEGORICAL_COLUMNS}
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
label = tf.constant(y)
return feature_cols, label
###################################
### ###
### Training ###
### ###
###################################
def prepare_train_test_data(self, data_feature, LabelColumnName):
firstloop = 1
for ticker, data in data_feature.items():
X, y = preprocessing_data(self.paras, data[0], LabelColumnName, one_hot_label_proc=False, array_format=False)
X_train_temp, X_test_temp, y_train_temp, y_test_temp = train_test_split(X, y, test_size=0.2)
# print('Train shape X:', X_train_temp.shape, ',y:', y_train_temp.shape)
# print('Test shape X:', X_test_temp.shape, ',y:', y_test_temp.shape)
if firstloop == 1:
firstloop = 0
X_train = X_train_temp
X_test = X_test_temp
y_train = y_train_temp
y_test = y_test_temp
else:
X_train.append(X_train_temp, ignore_index=True)
X_test.append(X_test_temp, ignore_index=True)
y_train = np.append(y_train, y_train_temp, 0)
y_test = np.append(y_test, y_test_temp, 0)
# print('Train shape X:', X_train.shape, ',y:', y_train.shape)
# print('Test shape X:', X_test.shape, ',y:', y_test.shape)
return X_train, y_train, X_test, y_test
def train_data(self, data_feature, LabelColumnName, CategoricalColumnName, ContinuousColumnName, flags):
model = self.build_model(CategoricalColumnName, ContinuousColumnName, flags)
X_train, y_train, X_test, y_test = self.prepare_train_test_data(data_feature, LabelColumnName)
model.fit(input_fn=lambda: self.input_fn(X_train, y_train, ContinuousColumnName, CategoricalColumnName), steps=flags.train_steps)
print(' ############## validation on test data ############## ')
self.predict(model, X_test, y_test, ContinuousColumnName, CategoricalColumnName)
return model
###################################
### ###
### Predicting ###
### ###
###################################
def predict(self, model, X, y, ContinuousColumnName, CategoricalColumnName):
predictions = np.array(list(model.predict_proba(input_fn=lambda: self.input_fn(X, y, ContinuousColumnName, CategoricalColumnName))))
results = model.evaluate(input_fn=lambda: self.input_fn(X, y, ContinuousColumnName, CategoricalColumnName), steps=1)
for key in sorted(results):
print("%s: %s"%(key, results[key]))
print('Accuracy: ', accuracy_score(y, tf.argmax(predictions, axis=1)))
return predictions
def predict_data(self, model, data_feature, LabelColumnName):
if model == None: model = self.build_model()
if model == None:
print('predict failed, model not exist')
return
for ticker in self.paras.predict_tickers:
try:
data = data_feature[ticker]
except:
print('stock not preparee', ticker)
continue
X_train, y_train = preprocessing_data(self.paras, data[0], LabelColumnName, one_hot_label_proc=False)
X_valid, y_valid = preprocessing_data(self.paras, data[1], LabelColumnName, one_hot_label_proc=False)
X_lately, y_lately = preprocessing_data(self.paras, data[2], LabelColumnName, one_hot_label_proc=False)
possibility_columns = ['outClass_' + str(idx) for idx in range(self.paras.n_out_class)]
print('\n ---------- ', ticker, ' ---------- \n')
print(' ############## validation on train data ############## ')
mse_known_train, predictions_train = self.predict(model, X_train, y_train)
data[3].loc[data[0].index, 'label'] = np.argmax(y, axis=1) #- int(self.paras.n_out_class/2)
data[3].loc[data[0].index, 'pred'] = np.argmax(predictions_train, axis=1) #- int(self.paras.n_out_class/2)
s = pd.DataFrame(predictions_train, index = data[0].index, columns=possibility_columns)
print(' ############## validation on valid data ############## ')
mse_known_lately, predictions_valid = self.predict(model, X_valid, y_valid)
data[3].loc[data[1].index, 'label'] = np.argmax(y_valid, axis=1) #- int(self.paras.n_out_class/2)
data[3].loc[data[1].index, 'pred'] = np.argmax(predictions_valid, axis=1) #- int(self.paras.n_out_class/2)
s = s.append(pd.DataFrame(predictions_valid, index = data[1].index, columns=possibility_columns))
print(' ############## validation on lately data ############## ')
mse_lately, predictions_lately = self.predict(model, X_lately, y_lately)
data[3].loc[data[2].index, 'label'] = np.nan#np.argmax(actual_lately, axis=1)
data[3].loc[data[2].index, 'pred'] = np.argmax(predictions_lately, axis=1) #- int(self.paras.n_out_class/2)
s = s.append(pd.DataFrame(predictions_lately, index = data[2].index, columns=possibility_columns))
data[3] = pd.merge(data[3], s, how='outer', left_index=True, right_index=True)
actual_count = []
predict_count = []
for i in range(self.paras.n_out_class):
actual_count.append(len(data[3][data[3]['label'] == i]))
predict_count.append(len(data[3][(data[3]['label'] == i) & (data[3]['label'] == data[3]['pred'])]))
valid_actual_count = []
valid_predict_count = []
data.append(data[3][-self.paras.valid_len:])
for i in range(self.paras.n_out_class):
valid_actual_count.append(len(data[4][data[4]['label'] == i]))
valid_predict_count.append(len(data[4][(data[4]['label'] == i) & (data[4]['label'] == data[4]['pred'])]))
print('classification counter:\n', actual_count)
print('classification possibility:\n', 100*np.array(actual_count)/np.sum(actual_count))
print('classification train predict:\n', 100*np.array(predict_count)/np.array(actual_count))
print('classification valid predict:\n', 100*np.array(valid_predict_count)/np.array(valid_actual_count))
#print('\nclassification centers:\n', np.round(np.sort(data[5], axis=1), decimals=3))
data[3]['label'] = data[3]['label'] - int(self.paras.n_out_class/2)
data[3]['pred'] = data[3]['pred'] - int(self.paras.n_out_class/2)
# rewrite data frame and save / update
data[3] = self.save_data_frame_mse(ticker, data[3], self.paras.window_len, possibility_columns, mses=[mse_known_train, mse_known_lately])
self.df = data[3]
pd.set_option('display.max_rows', None)
print('\n -------------------- \n')
print(data[3][-(self.paras.pred_len + self.paras.valid_len):])
###################################
### ###
### Save Data Output ###
### ###
###################################
def save_data_frame_mse(self, ticker, df, window_len, possibility_columns, mses):
df['label'] = df['label']#.astype(int)
df['pred'] = df['pred']#.astype(int)
# df = df.rename(columns={'label': 'a_+' + str(self.paras.pred_len) + '_d',
# 'pred': 'p_+' + str(self.paras.pred_len) + '_d'})
# new_list = ['a_+' + str(self.paras.pred_len) + '_d', 'p_+' + str(self.paras.pred_len) + '_d']
#default_list = ['open', 'high', 'low', 'close', 'volume']
#original_other_list = set(df.columns) - set(default_list) - set(new_list)
#original_other_list = list(original_other_list)
default_list = ['close', 'volume', 'pred_profit']
original_other_list = []
new_list = ['label', 'pred']
df = df[default_list + original_other_list + new_list + possibility_columns]
model_acc = mses[1] / mses[0]
if self.paras.save == True:
#df.to_csv(self.paras.save_folder + ticker + ('_%.2f' % model_acc) + '_data_frame.csv')
df.to_csv(self.paras.save_folder + ticker + '_' + str(window_len) + ('_%.2f' % model_acc) + '.csv')
with open(self.paras.save_folder + 'parameters.txt', 'w') as text_file:
text_file.write(self.paras.__str__())
text_file.write(str(mses[0]) + '\n')
text_file.write(str(mses[1]) + '\n')
return df
###################################
### ###
### Main Enterance ###
### ###
###################################
def run(self, train, predict):
################################################################################
self.paras.save_folder = self.get_save_directory()
print('Save Directory: ', self.paras.save_folder)
self.paras.model_folder = self.get_model_directory()
print('Model Directory: ', self.paras.model_folder)
################################################################################
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("model_dir", self.paras.model_folder, "Base directory for output models.")
flags.DEFINE_string("model_type", "wide_n_deep", "Valid model types: {'wide', 'deep', 'wide_n_deep'}.")
flags.DEFINE_integer("train_steps", self.paras.epoch, "Number of training steps.")
flags.DEFINE_string("train_data", "", "Path to the training data.")
flags.DEFINE_string("test_data", "", "path to the test data")
for window in self.paras.window_len:
self.do_run(train, predict, window)
def do_run(self, train, predict, window):
data_file = "data_file_recommand_system_" + str(window) + ".pkl"
DropColumnName = []
LabelColumnName = 'label'
CategoricalColumnName = ['WeekDay']
ContinuousColumnName = []
if os.path.exists(data_file):
input = open(data_file, 'rb')
data_feature = pickle.load(input)
input.close()
else:
data_feature = get_all_stocks_feature_data(self.paras, window, LabelColumnName)
output = open(data_file, 'wb')
pickle.dump(data_feature, output)
output.close()
model = None
if train: model = self.train_data(data_feature, window, LabelColumnName, CategoricalColumnName, ContinuousColumnName, FLAGS)
if predict: self.predict_data(model, data_feature, window, LabelColumnName)
|
mit
|
pnedunuri/scipy
|
scipy/stats/stats.py
|
18
|
169352
|
# Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
|
bsd-3-clause
|
jbarnoud/PBxplore
|
pbxplore/analysis/visualization.py
|
1
|
7539
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
# Standard modules
import os
import math
# Third-party modules
import numpy
# Conditional imports
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
IS_MATPLOTLIB = False
else:
IS_MATPLOTLIB = True
try:
import weblogolib
except ImportError:
IS_WEBLOGO = False
else:
IS_WEBLOGO = True
# Local modules
from .. import PB
from . import utils
# Python2/Python3 compatibility
# The range function in python 3 behaves as the range function in python 2
# and returns a generator rather than a list. To produce a list in python 3,
# one should use list(range). Here we change range to behave the same in
# python 2 and in python 3. In both cases, range will return a generator.
try:
range = xrange
except NameError:
pass
# Create the __all__ keyword according to the conditional imports
__all__ = []
if IS_MATPLOTLIB:
__all__ += ['plot_neq', 'plot_map']
if IS_WEBLOGO:
__all__ += ['generate_weblogo']
def plot_neq(fname, neq_array, residue_min=1, residue_max=None):
"""
Generate the Neq plot along the protein sequence
Parameters
----------
neq_array : numpy array
an array containing the neq value associated to the residue number
fname : str
The path to the file to write in
residue_min: int
the lower bound of the protein sequence
residue_max: int
the upper bound of the protein sequence
"""
neq = utils._slice_matrix(neq_array, residue_min, residue_max)
nb_residues = neq.shape[0]
# Residue number with good offset given the slice
x = numpy.arange(residue_min, residue_min + nb_residues)
fig = plt.figure(figsize=(2.0*math.log(nb_residues), 5))
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim([0, round(max(neq), 0)+1])
ax.plot(x, neq)
ax.set_xlabel('Residue number', fontsize=18)
ax.set_ylabel('Neq', fontsize=18, style='italic')
fig.savefig(fname)
def plot_map(fname, count_mat, residue_min=1, residue_max=None):
"""
Generate a map of the distribution of PBs along protein sequence from
an occurence matrix.
Parameters
----------
fname : str
The path to the file to write in
count_mat : numpy array
an occurence matrix returned by `count_matrix`.
residue_min: int
the lower bound of the protein sequence
residue_max: int
the upper bound of the protein sequence
"""
# Get the frequency matrix
freq_mat = utils.compute_freq_matrix(count_mat)
# Slice it
freq = utils._slice_matrix(freq_mat, residue_min, residue_max)
nb_residues = freq.shape[0]
# Residue number with good offset given the slice
x = numpy.arange(residue_min, residue_min + nb_residues)
# define ticks for x-axis
x_step = 5
xticks = x[::x_step]
# trying to round ticks: 5, 10, 15 instead of 6, 11, 16...
if xticks[0] == 1:
xticks = xticks-1
xticks[0] += 1
yticks = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p')
fig = plt.figure(figsize=(2.0*math.log(nb_residues), 4))
ax = fig.add_axes([0.1, 0.1, 0.75, 0.8])
# Color scheme inspired from ColorBrewer
# http://colorbrewer2.org/?type=diverging&scheme=RdYlBu&n=5
# This color scheme is colorblind safe
colors = [(44.0 / 255, 123.0 / 255, 182.0 / 255),
(171.0 / 255, 217.0 / 255, 233.0 / 255),
(255.0 / 255, 255.0 / 255, 191.0 / 255),
(253.0 / 255, 174.0 / 255, 97.0 / 255),
(215.0 / 255, 25.0 / 255, 28.0 / 255)]
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('ColBrewerRdYlBu', colors)
img = ax.imshow(numpy.transpose(freq[:, :]), interpolation='none', vmin=0, vmax=1,
origin='lower', aspect='auto', cmap=cmap)
ax.set_xticks(xticks - numpy.min(xticks))
ax.set_xticklabels(xticks)
ax.set_yticks(range(len(yticks)))
ax.set_yticklabels(yticks, style='italic', weight='bold')
colorbar_ax = fig.add_axes([0.87, 0.1, 0.03, 0.8])
fig.colorbar(img, cax=colorbar_ax)
# print "beta-strand", "coil" and "alpha-helix" text
# only if there is more than 20 residues
if nb_residues >= 20:
# center alpha-helix: PB m (13th PB out of 16 PBs)
# center coil: PB h and i (8th and 9th PBs out of 16 PBs)
# center beta-sheet: PB d (4th PB out of 16 PBs)
fig.text(0.05, 4.0/16*0.8+0.075, r"$\beta$-strand", rotation=90,
va='center', transform=ax.transAxes)
fig.text(0.05, 8.5/16*0.8+0.075, r"coil", rotation=90,
va='center')
fig.text(0.05, 13.0/16*0.8+0.075, r"$\alpha$-helix", rotation=90,
va='center', transform=ax.transAxes)
fig.text(0.01, 0.5, "PBs", rotation=90, weight="bold",
size='larger', transform=ax.transAxes)
fig.text(0.4, 0.01, "Residue number", weight="bold")
fig.text(0.96, 0.6, "Intensity", rotation=90, weight="bold")
fig.savefig(fname, dpi=300)
def generate_weblogo(fname, count_mat, residue_min=1, residue_max=None, title=""):
"""
Generates logo representation of PBs frequency along protein sequence through
the weblogo library.
The weblogo reference:
G. E. Crooks, G. Hon, J.-M. Chandonia, and S. E. Brenner.
'WebLogo: A Sequence Logo Generator.'
Genome Research 14:1188–90 (2004)
doi:10.1101/gr.849004.
http://weblogo.threeplusone.com/
Parameters
----------
fname : str
The path to the file to write in
count_mat : numpy array
an occurence matrix returned by `count_matrix`.
residue_min: int
the lower bound of residue frame
residue_max: int
the upper bound of residue frame
title: str
the title of the weblogo. Default is empty.
"""
# Slice the matrix
count = utils._slice_matrix(count_mat, residue_min, residue_max)
# Create a custom color scheme for PB
colors = weblogolib.ColorScheme([weblogolib.ColorGroup("d", "#1240AB", "strand main"),
weblogolib.ColorGroup("abcdef", "#1240AB", "strand others"),
weblogolib.ColorGroup("ghij", "#0BD500", "coil"),
weblogolib.ColorGroup("m", "#FD0006", "helix main"),
weblogolib.ColorGroup("klnop", "#FD0006", "helix others")])
# Load data from an occurence matrix
data = weblogolib.LogoData.from_counts(PB.NAMES, count)
# Create options
options = weblogolib.LogoOptions(fineprint=False, logo_title=title, color_scheme=colors,
stack_width=weblogolib.std_sizes["large"],
first_residue=residue_min)
# Generate weblogo
logo = weblogolib.LogoFormat(data, options)
# Retrieve image format
image_format = os.path.splitext(fname)[1][1:]
# Retrieve the right function given the image format
try:
if image_format == 'jpg':
image_format = 'jpeg'
formatter = weblogolib.formatters[image_format]
except KeyError:
raise ValueError("Invalid format image '{0}'."
" Valid ones are : eps, png, pdf, jpg/jpeg, svg".format(image_format))
# Format the logo
image = formatter(data, logo)
# Write it
with open(fname, "w") as f:
print(image, file=f)
|
mit
|
benoyantony/zeppelin
|
python/src/main/resources/python/bootstrap_sql.py
|
60
|
1189
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
|
apache-2.0
|
russel1237/scikit-learn
|
examples/model_selection/plot_roc.py
|
96
|
4487
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
openbiosd/PrecisionFda-Test-App
|
variant_plot.py
|
1
|
1034
|
#%matplotlib inline
import sys
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
print sys.argv
if len(sys.argv) > 1:
df = pd.read_csv(sys.argv[1])
else:
df = pd.read_csv("MAPT_ExACScores.csv")
high_frequency = df[df["ALLELE FREQUENCY"]>0.05]
x = list(high_frequency["AA_POS"])
y = list(high_frequency["ALLELE FREQUENCY"])
mutation = list(high_frequency["MUTATION"])
x_dup = [x[0]]
y_dup = [y[0]]
mutation_dup = [mutation[0]]
for aa in range(1, len(x)):
if x[aa] == x[aa-1]:
mutation_dup[-1] = mutation_dup[-1] + ', ' + mutation[aa]
else:
x_dup.append(x[aa])
y_dup.append(y[aa])
mutation_dup.append(mutation[aa])
fig = plt.figure()
ax = fig.add_subplot(111)
x = list(df.AA_POS)
y = list(df["ALLELE FREQUENCY"])
plt.plot(x, y)
plt.axhline(y=0.05, color='r')
for i in range(len(x_dup)):
ax.annotate(mutation_dup[i], xy=[x_dup[i],y_dup[i]], textcoords='data', rotation=70)
plt.grid()
plt.savefig('variant_plot.tiff')
#plt.show()
|
apache-2.0
|
jorik041/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
patrick-nicholson/spark
|
python/pyspark/sql/session.py
|
1
|
25922
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema, numSlices=None):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data, numSlices=numSlices), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True,
numSlices=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:param numSlices: specify as :class:`int` the number of slices (partitions) to distribute
``data`` across. Applies to ``data`` of :class:`list` or :class:`pandas.DataFrame`.
Defaults to `self.sparkContext.defaultParallelism`.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema, numSlices=numSlices)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
jpautom/scikit-learn
|
sklearn/ensemble/__init__.py
|
153
|
1382
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
Titan-C/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
93
|
3243
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
AnasGhrab/scikit-learn
|
sklearn/tree/tree.py
|
113
|
34767
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
downpat/mit-6000
|
problem-set-6/ps6_pkgtest.py
|
3
|
11790
|
##import matplotlib
import numpy
import pylab
data = [(4.9,'2011/03/12 23:57:34'),
(4.9,'2011/03/12 23:53:45'),
(5.0,'2011/03/12 23:51:24'),
(5.2,'2011/03/12 23:40:49'),
(5.1,'2011/03/12 23:37:24'),
(6.1,'2011/03/12 23:24:50'),
(5.4,'2011/03/12 23:20:42'),
(3.0,'2011/03/12 23:12:18'),
(4.7,'2011/03/12 22:53:35'),
(4.8,'2011/03/12 22:42:39'),
(5.6,'2011/03/12 22:31:27'),
(6.3,'2011/03/12 22:12:46'),
(4.9,'2011/03/12 22:05:17'),
(4.5,'2011/03/12 21:58:40'),
(5.3,'2011/03/12 21:58:17'),
(5.5,'2011/03/12 21:48:09'),
(5.0,'2011/03/12 21:40:58'),
(5.0,'2011/03/12 21:38:35'),
(4.8,'2011/03/12 20:48:48'),
(4.9,'2011/03/12 20:41:25'),
(4.3,'2011/03/12 20:16:59'),
(5.2,'2011/03/12 20:09:55'),
(5.0,'2011/03/12 20:08:25'),
(4.8,'2011/03/12 19:59:01'),
(4.6,'2011/03/12 19:55:28'),
(5.0,'2011/03/12 19:45:19'),
(4.7,'2011/03/12 19:43:47'),
(4.8,'2011/03/12 19:38:08'),
(4.9,'2011/03/12 19:22:47'),
(5.1,'2011/03/12 19:11:59'),
(4.8,'2011/03/12 19:01:05'),
(4.5,'2011/03/12 18:51:37'),
(4.8,'2011/03/12 18:38:38'),
(5.0,'2011/03/12 18:28:39'),
(4.7,'2011/03/12 18:09:15'),
(4.7,'2011/03/12 17:59:57'),
(4.7,'2011/03/12 17:54:19'),
(4.8,'2011/03/12 17:51:27'),
(4.8,'2011/03/12 17:48:13'),
(4.8,'2011/03/12 17:40:56'),
(6.0,'2011/03/12 17:19:24'),
(5.1,'2011/03/12 17:13:02'),
(5.2,'2011/03/12 17:11:09'),
(5.1,'2011/03/12 17:01:22'),
(4.8,'2011/03/12 16:55:41'),
(4.7,'2011/03/12 16:48:14'),
(5.0,'2011/03/12 16:38:45'),
(4.9,'2011/03/12 16:36:41'),
(5.0,'2011/03/12 16:22:15'),
(4.8,'2011/03/12 16:19:05'),
(4.7,'2011/03/12 16:07:39'),
(4.8,'2011/03/12 16:03:56'),
(4.9,'2011/03/12 14:53:21'),
(5.7,'2011/03/12 14:43:09'),
(5.6,'2011/03/12 14:35:00'),
(4.9,'2011/03/12 14:14:56'),
(5.3,'2011/03/12 14:11:05'),
(5.8,'2011/03/12 14:03:30'),
(5.2,'2011/03/12 13:57:12'),
(5.3,'2011/03/12 13:26:56'),
(6.4,'2011/03/12 13:15:42'),
(5.8,'2011/03/12 12:53:50'),
(4.9,'2011/03/12 12:50:28'),
(4.9,'2011/03/12 12:43:13'),
(4.8,'2011/03/12 12:27:17'),
(4.9,'2011/03/12 12:15:31'),
(4.9,'2011/03/12 12:06:57'),
(4.3,'2011/03/12 12:06:16'),
(4.4,'2011/03/12 12:03:43'),
(5.7,'2011/03/12 11:46:01'),
(4.9,'2011/03/12 11:39:05'),
(4.9,'2011/03/12 11:20:16'),
(4.9,'2011/03/12 11:05:00'),
(6.1,'2011/03/12 10:53:31'),
(4.7,'2011/03/12 10:49:24'),
(5.0,'2011/03/12 10:39:12'),
(5.3,'2011/03/12 10:34:49'),
(5.5,'2011/03/12 10:20:22'),
(4.4,'2011/03/12 10:17:09'),
(4.9,'2011/03/12 10:06:12'),
(5.0,'2011/03/12 10:00:26'),
(5.2,'2011/03/12 09:51:35'),
(4.9,'2011/03/12 09:47:59'),
(5.1,'2011/03/12 09:40:44'),
(5.0,'2011/03/12 09:27:12'),
(5.4,'2011/03/12 09:18:56'),
(4.7,'2011/03/12 09:05:33'),
(5.2,'2011/03/12 09:00:03'),
(4.6,'2011/03/12 08:58:24'),
(5.0,'2011/03/12 08:52:50'),
(5.0,'2011/03/12 08:45:30'),
(5.0,'2011/03/12 08:38:40'),
(4.8,'2011/03/12 08:38:04'),
(4.9,'2011/03/12 08:30:22'),
(4.6,'2011/03/12 08:22:07'),
(5.0,'2011/03/12 08:13:42'),
(5.2,'2011/03/12 07:54:10'),
(4.8,'2011/03/12 07:50:54'),
(4.7,'2011/03/12 07:46:42'),
(4.9,'2011/03/12 07:38:06'),
(4.7,'2011/03/12 07:30:18'),
(4.9,'2011/03/12 07:21:00'),
(5.1,'2011/03/12 07:18:53'),
(5.1,'2011/03/12 07:13:35'),
(5.0,'2011/03/12 07:07:32'),
(4.7,'2011/03/12 07:02:21'),
(4.5,'2011/03/12 06:53:53'),
(4.8,'2011/03/12 06:49:12'),
(4.8,'2011/03/12 06:44:01'),
(4.8,'2011/03/12 06:39:26'),
(5.0,'2011/03/12 06:36:00'),
(4.9,'2011/03/12 06:29:10'),
(2.9,'2011/03/12 06:20:47'),
(5.5,'2011/03/12 06:18:43'),
(5.5,'2011/03/12 06:10:44'),
(5.1,'2011/03/12 06:10:23'),
(5.2,'2011/03/12 06:00:25'),
(5.1,'2011/03/12 05:58:59'),
(5.0,'2011/03/12 05:14:51'),
(5.3,'2011/03/12 04:52:58'),
(5.1,'2011/03/12 04:47:19'),
(5.0,'2011/03/12 04:43:04'),
(4.7,'2011/03/12 04:37:21'),
(5.2,'2011/03/12 04:06:09'),
(5.5,'2011/03/12 04:04:49'),
(5.1,'2011/03/12 03:54:48'),
(5.3,'2011/03/12 03:34:46'),
(5.3,'2011/03/12 03:29:28'),
(4.8,'2011/03/12 03:21:44'),
(5.7,'2011/03/12 03:11:59'),
(5.8,'2011/03/12 03:01:49'),
(5.6,'2011/03/12 02:47:36'),
(2.6,'2011/03/12 02:43:49'),
(5.0,'2011/03/12 02:43:11'),
(5.2,'2011/03/12 02:34:05'),
(4.8,'2011/03/12 02:27:50'),
(4.9,'2011/03/12 02:13:51'),
(4.9,'2011/03/12 02:07:21'),
(4.8,'2011/03/12 02:04:55'),
(5.2,'2011/03/12 01:59:44'),
(6.8,'2011/03/12 01:47:16'),
(6.2,'2011/03/12 01:46:21'),
(5.2,'2011/03/12 01:43:20'),
(6.0,'2011/03/12 01:34:10'),
(5.1,'2011/03/12 01:25:04'),
(6.1,'2011/03/12 01:19:07'),
(5.7,'2011/03/12 01:17:41'),
(5.4,'2011/03/12 01:17:02'),
(4.8,'2011/03/12 01:12:16'),
(5.1,'2011/03/12 01:03:59'),
(5.5,'2011/03/12 00:45:10'),
(5.0,'2011/03/12 00:39:37'),
(5.0,'2011/03/12 00:25:08'),
(5.0,'2011/03/12 00:21:25'),
(3.0,'2011/03/12 00:04:09'),
(5.4,'2011/03/11 23:59:21'),
(5.3,'2011/03/11 23:58:04'),
(5.1,'2011/03/11 23:53:29'),
(5.1,'2011/03/11 23:40:12'),
(4.9,'2011/03/11 23:31:23'),
(5.3,'2011/03/11 23:26:51'),
(5.0,'2011/03/11 23:21:22'),
(4.9,'2011/03/11 23:05:15'),
(5.4,'2011/03/11 22:54:28'),
(5.8,'2011/03/11 22:51:18'),
(5.3,'2011/03/11 22:42:59'),
(5.0,'2011/03/11 22:36:57'),
(4.6,'2011/03/11 22:29:42'),
(4.9,'2011/03/11 22:22:36'),
(4.7,'2011/03/11 22:08:14'),
(2.9,'2011/03/11 22:01:26'),
(5.2,'2011/03/11 21:41:58'),
(4.8,'2011/03/11 21:34:25'),
(5.4,'2011/03/11 21:00:46'),
(5.1,'2011/03/11 20:41:24'),
(5.5,'2011/03/11 20:36:10'),
(5.1,'2011/03/11 20:34:40'),
(5.5,'2011/03/11 20:23:44'),
(6.3,'2011/03/11 20:11:23'),
(6.6,'2011/03/11 19:46:49'),
(3.2,'2011/03/11 19:46:27'),
(5.2,'2011/03/11 19:45:24'),
(5.5,'2011/03/11 19:31:56'),
(5.5,'2011/03/11 19:24:29'),
(6.1,'2011/03/11 19:02:59'),
(6.2,'2011/03/11 18:59:15'),
(4.9,'2011/03/11 18:55:15'),
(5.1,'2011/03/11 18:44:06'),
(4.9,'2011/03/11 18:43:14'),
(4.9,'2011/03/11 18:39:34'),
(5.9,'2011/03/11 18:17:06'),
(5.7,'2011/03/11 18:11:24'),
(4.7,'2011/03/11 18:02:39'),
(5.0,'2011/03/11 17:50:01'),
(5.4,'2011/03/11 17:32:14'),
(5.1,'2011/03/11 17:30:48'),
(5.0,'2011/03/11 17:23:57'),
(5.5,'2011/03/11 17:17:00'),
(4.8,'2011/03/11 17:15:00'),
(5.0,'2011/03/11 17:12:41'),
(5.0,'2011/03/11 16:55:53'),
(4.8,'2011/03/11 16:54:53'),
(5.0,'2011/03/11 16:34:22'),
(5.0,'2011/03/11 16:20:52'),
(5.5,'2011/03/11 16:11:27'),
(5.3,'2011/03/11 16:04:53'),
(5.0,'2011/03/11 15:55:23'),
(5.0,'2011/03/11 15:50:59'),
(5.0,'2011/03/11 15:46:02'),
(5.4,'2011/03/11 15:42:05'),
(4.9,'2011/03/11 15:36:16'),
(5.2,'2011/03/11 15:32:34'),
(5.6,'2011/03/11 15:19:38'),
(6.2,'2011/03/11 15:13:15'),
(5.0,'2011/03/11 15:01:39'),
(5.8,'2011/03/11 14:56:16'),
(5.4,'2011/03/11 14:54:04'),
(5.1,'2011/03/11 14:44:08'),
(5.4,'2011/03/11 14:26:31'),
(5.1,'2011/03/11 14:20:20'),
(2.6,'2011/03/11 14:18:29'),
(5.2,'2011/03/11 14:10:39'),
(5.5,'2011/03/11 14:00:38'),
(4.9,'2011/03/11 13:58:50'),
(5.2,'2011/03/11 13:55:28'),
(5.3,'2011/03/11 13:48:38'),
(5.6,'2011/03/11 13:43:10'),
(4.9,'2011/03/11 13:42:27'),
(5.6,'2011/03/11 13:34:36'),
(5.1,'2011/03/11 13:31:55'),
(2.5,'2011/03/11 13:21:37'),
(5.8,'2011/03/11 13:16:50'),
(5.1,'2011/03/11 13:15:45'),
(5.3,'2011/03/11 13:02:43'),
(5.3,'2011/03/11 12:59:21'),
(5.4,'2011/03/11 12:54:52'),
(5.6,'2011/03/11 12:49:01'),
(5.3,'2011/03/11 12:34:22'),
(5.2,'2011/03/11 12:33:19'),
(5.2,'2011/03/11 12:28:45'),
(5.3,'2011/03/11 12:24:37'),
(3.3,'2011/03/11 12:15:07'),
(5.9,'2011/03/11 12:12:53'),
(5.1,'2011/03/11 12:04:16'),
(5.5,'2011/03/11 11:56:16'),
(5.1,'2011/03/11 11:54:02'),
(5.8,'2011/03/11 11:46:47'),
(5.8,'2011/03/11 11:44:28'),
(6.5,'2011/03/11 11:36:39'),
(5.7,'2011/03/11 11:21:02'),
(5.5,'2011/03/11 11:16:51'),
(5.5,'2011/03/11 11:13:12'),
(5.5,'2011/03/11 11:10:58'),
(5.6,'2011/03/11 11:00:51'),
(5.1,'2011/03/11 10:58:06'),
(3.2,'2011/03/11 10:56:23'),
(5.0,'2011/03/11 10:52:08'),
(5.5,'2011/03/11 10:45:46'),
(5.3,'2011/03/11 10:35:36'),
(5.9,'2011/03/11 10:28:44'),
(5.6,'2011/03/11 10:20:27'),
(6.0,'2011/03/11 10:10:35'),
(5.2,'2011/03/11 09:59:57'),
(5.5,'2011/03/11 09:47:02'),
(2.5,'2011/03/11 09:45:08'),
(5.2,'2011/03/11 09:42:22'),
(5.4,'2011/03/11 09:37:08'),
(3.0,'2011/03/11 09:33:58'),
(2.9,'2011/03/11 09:24:53'),
(2.6,'2011/03/11 09:14:36'),
(2.6,'2011/03/11 09:10:26'),
(5.5,'2011/03/11 09:09:15'),
(2.8,'2011/03/11 09:05:22'),
(5.4,'2011/03/11 09:04:10'),
(2.5,'2011/03/11 09:03:56'),
(2.5,'2011/03/11 09:03:44'),
(3.3,'2011/03/11 09:03:38'),
(5.2,'2011/03/11 09:00:20'),
(4.6,'2011/03/11 08:58:26'),
(5.4,'2011/03/11 08:52:26'),
(5.5,'2011/03/11 08:46:48'),
(5.9,'2011/03/11 08:40:56'),
(6.1,'2011/03/11 08:31:08'),
(6.5,'2011/03/11 08:19:24'),
(6.2,'2011/03/11 08:15:41'),
(6.2,'2011/03/11 08:12:05'),
(5.5,'2011/03/11 08:10:31'),
(5.9,'2011/03/11 08:01:59'),
(5.6,'2011/03/11 07:56:16'),
(5.7,'2011/03/11 07:54:45'),
(5.8,'2011/03/11 07:42:55'),
(5.9,'2011/03/11 07:38:27'),
(4.4,'2011/03/11 07:36:12'),
(6.1,'2011/03/11 07:28:12'),
(6.1,'2011/03/11 07:25:33'),
(6.3,'2011/03/11 07:14:59'),
(5.9,'2011/03/11 07:13:47'),
(5.8,'2011/03/11 07:11:00'),
(6.3,'2011/03/11 06:57:15'),
(6.3,'2011/03/11 06:48:47'),
(7.1,'2011/03/11 06:25:51'),
(3.3,'2011/03/11 06:18:04'),
(6.8,'2011/03/11 06:15:40'),
(6.4,'2011/03/11 06:07:22'),
(6.4,'2011/03/11 06:06:11'),
(8.9,'2011/03/11 05:46:24'),
(3.4,'2011/03/11 04:51:25'),
(4.8,'2011/03/11 04:28:21'),
(4.5,'2011/03/11 04:05:41'),
(2.6,'2011/03/11 02:55:42'),
(2.9,'2011/03/11 02:52:08'),
(2.5,'2011/03/11 02:32:09'),
(2.8,'2011/03/11 01:02:00'),
(2.5,'2011/03/11 00:53:59'),
(4.0,'2011/03/11 00:25:29'),
(5.3,'2011/03/11 00:14:51'),
(4.9,'2011/03/10 22:44:26'),
(4.7,'2011/03/10 21:49:47'),
(2.8,'2011/03/10 20:49:08'),
(2.5,'2011/03/10 19:44:35'),
(5.0,'2011/03/10 19:06:11'),
(3.4,'2011/03/10 18:10:05'),
(6.5,'2011/03/10 17:08:37'),
(5.2,'2011/03/10 16:54:45'),
(3.2,'2011/03/10 15:56:25'),
(4.7,'2011/03/10 15:22:52'),
(4.7,'2011/03/10 14:30:34'),
(4.6,'2011/03/10 14:24:46'),
(2.7,'2011/03/10 13:45:34'),
(2.8,'2011/03/10 11:52:58'),
(5.1,'2011/03/10 11:21:08'),
(2.7,'2011/03/10 10:58:37'),
(5.2,'2011/03/10 09:02:22'),
(4.8,'2011/03/10 08:59:19'),
(5.7,'2011/03/10 08:08:21'),
(3.1,'2011/03/10 07:41:31'),
(4.8,'2011/03/10 07:33:04'),
(4.2,'2011/03/10 07:07:09'),
(3.2,'2011/03/10 06:19:01'),
(2.5,'2011/03/10 05:57:39'),
(2.5,'2011/03/10 05:01:11'),
(5.4,'2011/03/10 04:58:18'),
(4.6,'2011/03/10 04:26:48'),
(4.6,'2011/03/10 04:14:00'),
(2.8,'2011/03/10 02:34:17'),
(2.9,'2011/03/10 01:38:14'),
(5.0,'2011/03/10 01:20:24'),
(2.9,'2011/03/10 00:14:41'),
(4.8,'2011/03/09 23:57:42'),
(5.4,'2011/03/09 23:37:01'),
(2.6,'2011/03/09 22:41:30'),
(2.6,'2011/03/09 22:17:41'),
(6.5,'2011/03/09 21:24:52'),
(6.1,'2011/03/09 21:22:18'),
(4.9,'2011/03/09 21:00:58'),
(3.5,'2011/03/09 20:48:31'),
(6.0,'2011/03/09 18:44:35'),
(2.5,'2011/03/09 18:28:33'),
(3.7,'2011/03/09 18:25:26'),
(6.1,'2011/03/09 18:16:15'),
(4.9,'2011/03/09 17:57:27'),
(2.5,'2011/03/09 17:02:06'),
(2.5,'2011/03/09 16:12:03'),
(2.8,'2011/03/09 14:30:37'),
(4.8,'2011/03/09 14:24:06'),
(5.3,'2011/03/09 13:57:28'),
(3.2,'2011/03/09 13:55:24'),
(5.1,'2011/03/09 13:51:42'),
(5.0,'2011/03/09 13:24:08'),
(2.8,'2011/03/09 12:56:35'),
(2.6,'2011/03/09 12:14:14'),
(4.7,'2011/03/09 12:03:18'),
(5.1,'2011/03/09 11:27:52'),
(3.5,'2011/03/09 11:05:09'),
(4.7,'2011/03/09 10:13:40'),
(2.6,'2011/03/09 09:45:14'),
(4.8,'2011/03/09 08:55:38'),
(3.3,'2011/03/09 08:37:30'),
(5.3,'2011/03/09 08:02:36'),
(5.1,'2011/03/09 07:56:28'),
(5.0,'2011/03/09 07:13:48'),
(5.1,'2011/03/09 06:25:12'),
(4.9,'2011/03/09 06:12:13'),
(2.9,'2011/03/09 05:33:50'),
(4.7,'2011/03/09 05:27:06'),
(5.3,'2011/03/09 04:45:54'),
(5.7,'2011/03/09 04:37:04'),
(5.2,'2011/03/09 04:32:10'),
(3.0,'2011/03/09 04:17:17'),
(4.8,'2011/03/09 04:15:39'),
(5.2,'2011/03/09 04:05:54'),
(2.5,'2011/03/09 03:51:21'),
(5.0,'2011/03/09 03:19:00'),
(5.2,'2011/03/09 03:08:36'),
(5.6,'2011/03/09 02:57:17'),
(7.2,'2011/03/09 02:45:20'),
(4.6,'2011/03/09 01:47:47'),
(4.7,'2011/03/09 01:30:27')]
ydata = []
for t in data:
ydata.append(t[0])
pylab.plot(ydata)
pylab.title('Earthquake Magnitude in Japan from 3/9-3/12')
pylab.xlabel('Time')
pylab.ylabel('Magnitude')
pylab.show()
|
mit
|
BiaDarkia/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
22
|
24642
|
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
V = V.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
def test_randomized_svd_low_rank_all_dtypes():
for dtype in (np.int32, np.int64, np.float32, np.float64):
yield check_randomized_svd_low_rank, dtype
@ignore_warnings # extmath.norm is deprecated to be removed in 0.21
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
# Check the warning with an int array and np.dot potential overflow
assert_warns_message(
UserWarning, 'Array type is integer, np.dot may '
'overflow. Data should be float type to avoid this issue',
squared_norm, X.astype(int))
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
|
bsd-3-clause
|
legacysurvey/rapala
|
bokpipe/bokmkimage.py
|
2
|
2863
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import rc
from astropy.stats import sigma_clip
import astropy.visualization as vis
from astropy.visualization.mpl_normalize import ImageNormalize
import fitsio
from . import bokutil
def make_fov_image(fov,pngfn=None,**kwargs):
stretch = kwargs.get('stretch','linear')
interval = kwargs.get('interval','zscale')
imrange = kwargs.get('imrange')
contrast = kwargs.get('contrast',0.25)
ccdplotorder = ['CCD2','CCD4','CCD1','CCD3']
if interval == 'rms':
try:
losig,hisig = imrange
except:
losig,hisig = (2.5,5.0)
#
cmap = kwargs.get('cmap','viridis')
cmap = plt.get_cmap(cmap)
cmap.set_bad('w',1.0)
w = 0.4575
h = 0.455
rc('text',usetex=False)
fig = plt.figure(figsize=(6,6.5))
cax = fig.add_axes([0.1,0.04,0.8,0.01])
ims = [ fov[ccd]['im'] for ccd in ccdplotorder ]
allpix = np.ma.array(ims).flatten()
stretch = {
'linear':vis.LinearStretch(),
'histeq':vis.HistEqStretch(allpix),
'asinh':vis.AsinhStretch(),
}[stretch]
if interval=='zscale':
iv = vis.ZScaleInterval(contrast=contrast)
vmin,vmax = iv.get_limits(allpix)
elif interval=='rms':
nsample = 1000 // nbin
background = sigma_clip(allpix[::nsample],iters=3,sigma=2.2)
m,s = background.mean(),background.std()
vmin,vmax = m-losig*s,m+hisig*s
elif interval=='fixed':
vmin,vmax = imrange
else:
raise ValueError
norm = ImageNormalize(vmin=vmin,vmax=vmax,stretch=stretch)
for n,(im,ccd) in enumerate(zip(ims,ccdplotorder)):
if im.ndim == 3:
im = im.mean(axis=-1)
x = fov[ccd]['x']
y = fov[ccd]['y']
i = n % 2
j = n // 2
pos = [ 0.0225 + i*w + i*0.04, 0.05 + j*h + j*0.005, w, h ]
ax = fig.add_axes(pos)
_im = ax.imshow(im,origin='lower',
extent=[x[0,0],x[0,-1],y[0,0],y[-1,0]],
norm=norm,cmap=cmap,
interpolation=kwargs.get('interpolation','nearest'))
if fov['coordsys']=='sky':
ax.set_xlim(x.max(),x.min())
else:
ax.set_xlim(x.min(),x.max())
ax.set_ylim(y.min(),y.max())
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if n == 0:
cb = fig.colorbar(_im,cax,orientation='horizontal')
cb.ax.tick_params(labelsize=9)
tstr = fov.get('file','')+' '+fov.get('objname','')
title = kwargs.get('title',tstr)
title = title[-60:]
fig.text(0.5,0.99,title,ha='center',va='top',size=12)
if pngfn is not None:
plt.savefig(pngfn)
plt.close(fig)
def make_fov_image_fromfile(fileName,pngfn,nbin=1,coordsys='sky',**kwargs):
fits = bokutil.BokMefImage(fileName,
mask_file=kwargs.get('mask'),
mask_type=kwargs.get('mask_type'),
read_only=True)
fov = fits.make_fov_image(nbin,coordsys)
fov['file'] = fileName
return make_fov_image(fov,pngfn,**kwargs)
|
bsd-3-clause
|
google/makani
|
lib/python/batch_sim/parameter_tables.py
|
1
|
62232
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for writing batch simulations like hover disturbances."""
import copy
import csv
import datetime
import json
import os
import re
import shutil
import tempfile
import textwrap
import gflags
import makani
from makani.config import mconfig
from makani.config import overrides_util
from makani.control import system_types
from makani.lib.python import build_info
from makani.lib.python import dict_util
from makani.lib.python import gsutil
from makani.lib.python.batch_sim import client as client_base
import makani.lib.python.batch_sim.batch_sim_util as batch_sim_util
from makani.lib.python.batch_sim.scoring_functions import plot
import matplotlib
import numpy
gflags.DEFINE_integer('parameter_seed', 12345,
'Seed for use in parameter table generation.')
gflags.DEFINE_boolean('randomize_parameter_seed', False,
'Select a random value for parameter table seed.')
gflags.DEFINE_boolean('upload_overview_data', False,
'Upload overview_data.json to batch sim folder on GCS.')
FLAGS = gflags.FLAGS
class _Random(object):
"""Container for a PRNG that can be seeded using --parameter_seed."""
def __init__(self):
self.seed = None
self._prng = None
def Generator(self):
if self.seed is None:
if FLAGS.randomize_parameter_seed:
self.seed = numpy.random.randint(numpy.iinfo(numpy.uint32).max)
else:
self.seed = FLAGS.parameter_seed
self._prng = numpy.random.RandomState(self.seed)
return self._prng
_random = _Random()
class OverridesTable(object):
"""Abstract class for a table of simulations with parameter overrides.
Attributes:
name: The title of this table.
x_label: The x-axis label.
x_values: Numerical labels for the columns of this table.
y_label: The y-axis label.
y_values: Numerical labels for the rows of this table.
base_overrides: Overrides to apply to all table entries.
base_params: Parameter structure with base overrides.
"""
def __init__(self, name, x_label, x_values, y_label, y_values,
base_overrides=None):
if base_overrides is None:
base_overrides = {}
self.name = name
self.base_overrides = copy.deepcopy(base_overrides)
self.base_params = mconfig.MakeParams(
'common.all_params', overrides=copy.deepcopy(base_overrides),
override_method='derived')
self.x_label = x_label
self.x_values = tuple(x_values)
self.y_label = y_label
self.y_values = tuple(y_values)
def GetDimensions(self):
"""Return the dimensions of this table."""
return (len(self.x_values), len(self.y_values))
def GetOverrides(self, x_idx, y_idx):
"""Abstract method for getting the overrides for this simulation."""
raise NotImplementedError
def GetRangeValues(self, x_idx, y_idx):
"""Abstract method for getting the parameter ranges for this simulation."""
raise NotImplementedError
class OverridesTableSimClient(client_base.BatchSimClient):
"""Abstract client for generating a table of tables."""
def __init__(self, output_dir, tables, scoring_functions,
columns=3, title=None, **kwargs):
"""Constructor for a disturbance sim.
Args:
output_dir: Directory in which outputs are to be written.
tables: A list of OverridesTables.
scoring_functions: A list of ScoringFunctions.
columns: Number of columns to use in displaying tables.
title: Title for the generated HTML.
**kwargs: See client_base.BatchSimClient.
"""
super(OverridesTableSimClient, self).__init__(**kwargs)
self._output_dir = output_dir
self._tables = tables
self._scoring_functions = scoring_functions
self._title = title
self._columns = columns
# This array should be populated by _GetConfigs with each
# configuration corresponding to the return value of
# _GetSimParameters.
self._linear_indices = [None for _ in range(self._GetNumTables())]
self._linear_index_to_table_index = []
self._overrides = []
for table_idx in range(self._GetNumTables()):
table_dim = self._tables[table_idx].GetDimensions()
self._linear_indices[table_idx] = [[None for _ in range(table_dim[1])]
for _ in range(table_dim[0])]
for x_idx, y_idx in numpy.ndindex(table_dim):
self._linear_indices[table_idx][x_idx][y_idx] = len(self._overrides)
self._linear_index_to_table_index.append(table_idx)
self._overrides += [
self._tables[table_idx].GetOverrides(x_idx, y_idx)
]
def _GetNumTables(self):
"""Method returning the number of tables.
Returns:
The number of parameter tables to be swept.
"""
return len(self._tables)
def _GetLinearIndex(self, idx):
"""Convert a 3-D index into a linear index."""
return self._linear_indices[idx[0]][idx[1]][idx[2]]
def _GetConfig(self, idx):
table_idx = self._linear_index_to_table_index[idx]
return mconfig.SimpleOverride(
overrides_util.PreprocessOverrides(self._overrides[idx]),
copy.deepcopy(self._tables[table_idx].base_params))
def _GenerateConfigs(self):
for idx in range(len(self._overrides)):
yield self._GetConfig(idx)
@client_base.JsonReducer
def _ReduceWorkerOutput(self, outputs):
matplotlib.use('Agg')
self._GenerateHtml(self._output_dir, outputs)
self._GenerateJson(self._output_dir, outputs)
def _GenerateHtml(self, output_dir, outputs):
# Create directory to place output files.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# File to write each results page.
def _GetOverridesPageFilename(i):
return 'overrides_%d.html' % i
def _GetOverridesFilename(i):
return 'overrides_%d.json' % i
def _GetStatsFilename(table_idx):
return 'table_%d.csv' % table_idx
def _GetScoreCheckboxes():
"""Returns an HTML title bar for selecting which scores to show."""
score_fn_names = [score_fn.GetName()
for score_fn in self._scoring_functions]
lines = ['<div id="score_container" style="text-align: center">'
'<div style="display: inline-block; text-align: left">'
'<h3>Scores</h3>',
'<button onclick="$(\'#score_display\').toggle();"'
'>Hide/show</button>',
'<div id="score_display">',
'<form id="active_scores" style="margin-top: 1rem;">']
for i, name in enumerate(score_fn_names):
elt_name = 'score' + str(i)
line = ''.join([
'<input type="checkbox" name="{0}" value="{0}" checked>'.format(
elt_name),
name,
'</input><br>'])
lines.append(line)
lines += ['</form>',
'<button id="select_all_scores">Select all</button>',
'<button id="clear_all_scores">Clear all</button>',
'</div>', # score_display
'</div>',
'</div>']
return '\n'.join(lines)
# Convert a worker's output into an HTML table cell.
def _GetScoreData(idx, outputs):
"""Get score data and link for a given index.
Args:
idx: Multi-index of this sim (see _GetLinearIndex).
outputs: Array of outputs from the workers.
Returns:
(default_score, all_scores, link) tuple.
"""
i = self._GetLinearIndex(idx)
output = outputs[i]
all_scores = []
default_score = float('nan')
if output['sim_success']:
all_scores = [score_fn.GetScore(output[score_fn.GetName()])
for score_fn in self._scoring_functions]
default_score = numpy.max(all_scores)
return (default_score, all_scores, _GetOverridesPageFilename(i))
def _WriteScoreTables(filename):
"""Write score data tables into HTML string.
Args:
filename: name of file where HTML string will be written.
"""
with open(filename, 'w') as f:
f.write(textwrap.dedent("""
<html>
<head>
<link rel="stylesheet" type="text/css" href="style.css">
<style>
#score_container {
position: fixed;
top: 1rem;
left: 2rem;
background-color: rgba(255, 255, 255, 1);
border-style: solid;
z-index: 10;
padding: 0rem 1rem 1rem 1rem;
overflow: auto;
max-height: 90%;
box-shadow: 4px 4px 3px rgba(120, 120, 120, 1);
}
</style>
</head>
<script src="jquery.js"></script>
<script src="scoring_function_util.js"></script>
<script>
$(document).ready(function() {
var scoreCheckboxes = $("#active_scores").find(":checkbox");
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
scoreCheckboxes.change(function() {
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
$("#select_all_scores").click(function() {
scoreCheckboxes.prop("checked", true);
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
$("#clear_all_scores").click(function() {
scoreCheckboxes.removeAttr("checked");
UpdateTableCellsFromCheckboxes(scoreCheckboxes);
});
});
</script>
<body>
<center>
<h1>"""))
if self._title is not None:
f.write(self._title + ' ')
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M %Z'))
f.write(textwrap.dedent("""\
</h1>
%s
%s
<table>""" % (_GetScoreCheckboxes(),
batch_sim_util.GetHtmlScoreTableLegend())))
for table_idx in range(self._GetNumTables()):
table_dim = self._tables[table_idx].GetDimensions()
if (table_idx % self._columns) == 0:
f.write('<tr>\n')
f.write('<td align="right">\n')
# Note that we transpose the array here.
table_values = [
[_GetScoreData([table_idx, x_idx, y_idx], outputs)
for x_idx in range(table_dim[0])]
for y_idx in range(table_dim[1])
]
f.write(batch_sim_util.GetHtmlScoreTable(
table_values,
'<a href="%s">%s</a>' % (_GetStatsFilename(table_idx),
self._tables[table_idx].name),
self._tables[table_idx].x_values,
self._tables[table_idx].x_label,
self._tables[table_idx].y_values,
self._tables[table_idx].y_label))
f.write('</td>\n')
if (table_idx % self._columns) == self._columns - 1:
f.write('</tr>\n')
f.write(textwrap.dedent("""
</center></body>
</html>"""))
def _GcsLink(filename):
url = ('https://storage.cloud.google.com/makani/batch_sim/'
+ self._sim_name + '/h5_logs/' + filename)
return '<a href="%s">%s</a>' % (url, filename)
def _WriteStats(table_idx, outputs):
"""Writes out batch sim output values into a unique csv file.
Args:
table_idx: batch sim number of a given Override table.
outputs: contains all HTML outputs for the batch sim at table_idx.
"""
filename = os.path.join(output_dir, _GetStatsFilename(table_idx))
table = self._tables[table_idx]
with open(filename, 'w') as f:
writer = csv.writer(f, delimiter=' ', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow([table.name])
labels = [
score_fn.GetName() for score_fn in self._scoring_functions
] + [
override_name
for override_name, _ in table.GetOverridesDesc(0, 0)
]
writer.writerow(labels)
for x_idx, y_idx in numpy.ndindex(table.GetDimensions()):
idx = self._GetLinearIndex((table_idx, x_idx, y_idx))
if not outputs[idx]['sim_success']:
continue
row = []
for score_fn in self._scoring_functions:
score_fn_output = outputs[idx][score_fn.GetName()]
row.append(score_fn.GetValue(score_fn_output))
for _, value in table.GetOverridesDesc(x_idx, y_idx):
row.append(value)
writer.writerow(row)
# For each output, write an overrides page.
assert len(outputs) == numpy.sum([
numpy.prod(self._tables[i].GetDimensions())
for i in range(self._GetNumTables())
])
for table_idx in range(self._GetNumTables()):
_WriteStats(table_idx, outputs)
table_dims = self._tables[table_idx].GetDimensions()
for x_idx, y_idx in numpy.ndindex(table_dims):
idx = self._GetLinearIndex((table_idx, x_idx, y_idx))
with open(os.path.join(output_dir,
_GetOverridesFilename(idx)), 'w') as f:
f.write(json.dumps(self._GetConfig(idx)))
with open(os.path.join(output_dir,
_GetOverridesPageFilename(idx)), 'w') as f:
header_info = [('Table', self._tables[table_idx].name),
('Overrides',
'<a href="%s">%s</a>' % (_GetOverridesFilename(idx),
_GetOverridesFilename(idx))),
('Full log', _GcsLink('%d.h5' % idx)),
('Sparse log (10 Hz)', _GcsLink('%d_sparse.h5' % idx)),
('Note', 'Log files are overwritten whenever sim name '
'%s is reused.' % self._sim_name)]
overrides = self._tables[table_idx].GetOverridesDesc(x_idx, y_idx)
overrides_lim = (self._tables[table_idx]
.GetOverridesLimits(x_idx, y_idx))
results = []
if outputs[idx]['sim_success']:
for score_fn in self._scoring_functions:
score_fn_output = outputs[idx][score_fn.GetName()]
results += [batch_sim_util.HtmlScoreTableValue(
name=score_fn.GetName(),
severity=score_fn.GetSeverity(),
limits=score_fn.Limits(),
value=score_fn.GetValue(score_fn_output),
score=score_fn.GetScore(score_fn_output)
)]
else:
err = batch_sim_util.EscapeHtml(
outputs[idx]['sim_error_message'])
results += [batch_sim_util.HtmlScoreTableValue(
name='Error Message',
severity=None,
limits=None,
value='<pre>%s</pre>' % err,
score=None
)]
# Need to zip the two overrides tuples together
overrides_info = []
for (description, limits) in zip(overrides, overrides_lim):
overrides_info += [(description + limits)]
command_info = [
('Command line',
'run_sim -o \'%s\'' % json.dumps(dict_util.UpdateNestedDict(
self._tables[table_idx].base_overrides,
self._overrides[idx])))
]
if 'events' in outputs[idx]:
with tempfile.NamedTemporaryFile(
suffix='.html', delete=False) as temp_fp:
plot.PlotEvents(outputs[idx]['events'], temp_fp.name)
with open(temp_fp.name) as fp:
event_html = fp.read()
os.remove(temp_fp.name)
else:
event_html = ''
f.write(batch_sim_util.GetHtmlScoreTableResultPage(
results, header_info, overrides_info, command_info, event_html))
_WriteScoreTables(output_dir + '/old_report.html')
# Copy CSS and JS files to the output directory.
shutil.copyfile(os.path.join(
makani.HOME, 'lib/python/batch_sim/overrides_table_style.css'),
os.path.join(output_dir, 'style.css'))
shutil.copy(os.path.join(
makani.HOME, 'lib/python/batch_sim/scoring_function_util.js'),
output_dir)
shutil.copy(os.path.join(makani.HOME, 'lib/python/batch_sim/jquery.js'),
output_dir)
os.chmod(os.path.join(output_dir, 'jquery.js'), 0770)
def _GenerateJson(self, output_dir, outputs):
"""Writes JSON data for new-style reports."""
table_data = []
for table_idx in range(self._GetNumTables()):
table_dims = self._tables[table_idx].GetDimensions()
if isinstance(self._tables[table_idx].y_values[0], (float, int)):
yticks = [('%.2f' % x).rstrip('0').rstrip('.')
for x in self._tables[table_idx].y_values]
elif isinstance(self._tables[table_idx].y_values[0], (list, dict)):
yticks = [str(x) for x in self._tables[table_idx].y_values]
else:
assert False, 'yticks must be number, list, or dict.'
table_entry = {
'title': self._tables[table_idx].name,
'num_rows': table_dims[1],
'num_cols': table_dims[0],
'xlabel': self._tables[table_idx].x_label,
'xticks': [('%.2f' % x).rstrip('0').rstrip('.')
for x in self._tables[table_idx].x_values],
'ylabel': self._tables[table_idx].y_label,
'yticks': yticks,
'job_data': [],
}
table_data.append(table_entry)
for x_idx, y_idx in numpy.ndindex(table_dims):
idx = self._GetLinearIndex((table_idx, x_idx, y_idx))
job_entry = {
'job_id': idx,
'table_pos': [x_idx, y_idx],
'scores': [],
'sim_success': True,
}
table_entry['job_data'].append(job_entry)
if outputs[idx]['sim_success']:
for score_fn in self._scoring_functions:
score_fn_output = outputs[idx][score_fn.GetName()]
job_entry['scores'] += [score_fn.GetScore(score_fn_output)]
else:
job_entry['sim_success'] = False
overview_data = {
'title': (self._title + ' ' +
datetime.datetime.now().strftime('%Y-%m-%d %H:%M %Z')),
'sim_name': self._sim_name,
'metrics': [{'name': f.GetName(),
'system_labels': f.GetSystemLabels(),
'severity': f.GetSeverity()}
for f in self._scoring_functions],
'num_major_cols': self._columns,
'color_map': ['#74add1', '#e0f3f8', '#fee090',
'#fdae61', '#f46d43', '#a50026'],
'commit': build_info.GetGitSha(),
'parameter_seed': _random.seed,
'table_data': table_data,
}
# d3 will fail to parse a JSON file that contains NaNs or Infinity's --
# they aren't officially supported by JSON. Replace them with nulls instead.
json_str = re.sub(r'\bNaN\b', 'null', json.dumps(overview_data, indent=2))
json_str = re.sub(r'\bInfinity\b', 'null', json_str)
with open(output_dir + '/overview_data.json', 'w') as f:
f.write(json_str)
if FLAGS.upload_overview_data:
gsutil_api = gsutil.GsutilApi()
gsutil_api.Copy(output_dir + '/overview_data.json',
os.path.join('gs://makani/', self._gcs_base_dir,
'overview_data.json'), True)
copy_specs = [('d3.v4.min.js', 'd3.v4.min.js'),
('c3.min.js', 'c3.min.js'),
('c3.css', 'c3.css'),
('bokeh-0.13.0.min.css', 'bokeh-0.13.0.min.css'),
('bokeh-0.13.0.min.js', 'bokeh-0.13.0.min.js'),
('frontend/sweeps_report.css', 'sweeps_report.css'),
('frontend/sweeps_report.html', 'index.html'),
('frontend/sweeps_report.js', 'sweeps_report.js'),
('frontend/shared_library.js', 'shared_library.js')]
for source, dest_basename in copy_specs:
dest = os.path.join(output_dir, dest_basename)
shutil.copy(os.path.join(makani.HOME, 'lib/python/batch_sim', source),
dest)
if dest.endswith('.js'): # Make JavaScript executable.
os.chmod(dest, 0770)
else: # Allow files to be overwritable
os.chmod(dest, 0660)
class ParameterRange(object):
"""Abstract class representing a parameter sweep.
Classes derived from ParameterRange represent lists of labeled, partial
overrides for simulations. See WindSpeedParameterRange for an example.
Attributes:
label: The label associated with this parameter range.
values: Numerical labels for each individual value.
values_range: The values that define the range of the distribution.
distribution: [dict] probability distribution for random values.
For normal distribution the expected keys are:
mean - Mean of the parameter values distribution.
sigma - Standard deviation of parameter values distribution.
bound or lower/upper_bound - bounds to truncate parameter distribution,
specified as multiple of sigma, or
absolute.
type - 'normal'.
For uniform distribution the expected keys are:
lower_bound: minimum value of the parameter.
upper_bound: maximum value of the parameter.
type - 'uniform'.
Any other distribution type will raise an error.
"""
def __init__(self, label, values, distribution=None):
self.label = label
self.values = values
self.values_range = [numpy.min(values, axis=0), numpy.max(values, axis=0)]
self.distribution = {'type': None} if distribution is None else distribution
def _ClipValue(self, value):
"""Clip the returned value from random generator to specified bounds."""
# This is preferred over calling the generator again for a
# new random number, as this ensures repeatable seed for batches
# irrespective of choice of parameter values.
if ('lower_bound' in self.distribution and
'upper_bound' in self.distribution):
lower_bound = self.distribution['lower_bound']
upper_bound = self.distribution['upper_bound']
else:
lower_bound = (self.distribution['mean'] -
self.distribution['bound'] * self.distribution['sigma'])
upper_bound = (self.distribution['mean'] +
self.distribution['bound'] * self.distribution['sigma'])
self.values_range = [lower_bound, upper_bound]
return numpy.clip(value, a_min=lower_bound, a_max=upper_bound)
def GetRandomValue(self):
"""Randomly select a value from the parameters ditribution."""
if self.distribution['type'] == 'uniform':
value = _random.Generator().uniform(self.distribution['lower_bound'],
self.distribution['upper_bound'])
self.values_range = [self.distribution['lower_bound'],
self.distribution['upper_bound']]
elif self.distribution['type'] == 'normal':
value = _random.Generator().normal(self.distribution['mean'],
self.distribution['sigma'])
value = self._ClipValue(value)
elif self.distribution['type'] == 'lognormal':
value = (_random.Generator().lognormal(self.distribution['mean'],
self.distribution['sigma'])
+ self.distribution['loc'])
value = self._ClipValue(value)
elif self.distribution['type'] == 'vonmises':
if self.distribution['units'].startswith('rad'):
mu = self.distribution['mean']
kappa = 1.0 / numpy.square(self.distribution['sigma'])
value = _random.Generator().vonmises(mu, kappa)
elif self.distribution['units'].startswith('deg'):
mu = numpy.deg2rad(self.distribution['mean'])
kappa = 1.0 / numpy.square(numpy.deg2rad(self.distribution['sigma']))
value = numpy.rad2deg(_random.Generator().vonmises(mu, kappa))
else:
raise NotImplementedError
# TODO: Implement smart circular clipping option.
elif self.distribution['type'] == 'multimodal':
# Use the ParameterRange class recursively to combine any combination
# of distributions.
# Args:
# distributions: List of the different distributions to combine, each a
# dictionary with all the parameters needed to be a fully defined
# ParameterRange distribution.
# weights: List of the same length, with the relative probability
# weights.
select_value = _random.Generator().uniform(
0.0, sum(self.distribution['weights']))
selected_mode = (numpy.searchsorted(
numpy.cumsum(self.distribution['weights']),
select_value))
mode = ParameterRange(
None, None, self.distribution['distributions'][selected_mode])
value = mode.GetRandomValue()
# Instead of clipping final output, rely on the clipping specified in
# each mode.
else:
# Add other distribution implementations here as needed.
raise NotImplementedError
return value
def GetRandomOverride(self):
"""Randomly select a set of overrides."""
value = self.GetRandomValue()
return self.GetOverrides(value), value
def GetDisplayValue(self, value):
"""Get a human readable representation of a value."""
return value
def GetRangeValues(self):
"""Get the parameter range values."""
return self.values_range
def GetOverrides(self, value):
"""Abstract method which returns a set of overrides.
Args:
value: Scalar value to apply as an override.
Returns:
A set of overrides to be included in a simulation.
"""
raise NotImplementedError
class NoneParameterRange(ParameterRange):
"""An empty parameter range."""
def __init__(self, label):
"""Constructor.
Args:
label: Label for this range.
"""
super(NoneParameterRange, self).__init__(label, [0.0])
def GetOverrides(self, unused_idx):
return {}
class WaveParameterSelector(object):
"""Select appropriate sea conditions given the wind."""
def __init__(self, wave_variation_params):
# Dictionary defining the correlation_fit parameters needed to calculate the
# expected value as a function of other sim parameters.
self._wave_variation_params = wave_variation_params
def _GetWaveOverrides(
self, wave_variation_overrides, wind_speed, wind_direction):
"""Calcute the expected value for each parameter and add the variation."""
wave_overrides = {}
wave_overrides['wave_heading_ned'] = (
(wind_direction
+ numpy.deg2rad(wave_variation_overrides['wave_wind_alignment']))
% (2 * numpy.pi))
hs = self._wave_variation_params['significant_height']['correlation_fit']
wave_overrides['significant_height'] = (
hs['coefficient'] * numpy.square(wind_speed) +
hs['intercept'] + wave_variation_overrides['significant_height'])
wave_overrides['significant_height'] = numpy.clip(
wave_overrides['significant_height'],
a_min=hs['lower_bound'], a_max=hs['upper_bound'])
assert wave_overrides['significant_height'] > 0.0, (
'Wave height must be greater than zero. '
'Adjust the correlation_fit lower_bound.')
tp = self._wave_variation_params['peak_period']['correlation_fit']
wave_overrides['peak_period'] = (
tp['coefficient'] * wave_overrides['significant_height'] +
tp['intercept'] + wave_variation_overrides['peak_period'])
wave_overrides['peak_period'] = numpy.clip(
wave_overrides['peak_period'],
a_min=tp['lower_bound'], a_max=tp['upper_bound'])
assert wave_overrides['peak_period'] > 0.0, (
'Wave period must be greater than zero. '
'Adjust the correlation_fit lower_bound.')
return {'sim': {'sea_sim': wave_overrides}}
def AddWaveOverrides(self, overrides, base_overrides):
"""Pull out the wave variation overrides and add in the wave overrides."""
merged_overrides = dict_util.MergeNestedDicts(overrides, base_overrides)
wave_overrides = self._GetWaveOverrides(
merged_overrides['sim']['sea_sim']['waves'],
merged_overrides['sim']['phys_sim']['wind_speed'],
merged_overrides['sim']['phys_sim']['wind_direction'])
overrides = dict_util.MergeNestedDicts(overrides, wave_overrides)
del overrides['sim']['sea_sim']['waves']
return overrides
class CustomParameterRange(ParameterRange):
"""A parameter range for a custom defined variable."""
def __init__(self, name, path, value, distribution):
"""Constructor.
Args:
name: String of name and/or description of variable.
path: List of keys to get to parameter in overrides.
value: Replacement value for parameter.
distribution: Parameter for specifying probability distribution.
"""
self.path = path
super(CustomParameterRange, self).__init__(
'%s :' % name, value, distribution)
def GetOverrides(self, value):
# Makes a copy to enable popping off last value.
path = copy.deepcopy(self.path)
if not isinstance(path[0], list):
path = [path]
value = [value]
all_overrides = {}
for p, v in zip(path, value):
overrides_nest = {}
# Places value into bottom layer of overrides.
overrides_nest[p.pop()] = v
# Initialize overrides with bottom layer.
overrides = overrides_nest
# Builds up nested overrides dict from path.
while p:
overrides = {}
overrides[p.pop()] = overrides_nest
overrides_nest = overrides
all_overrides = dict_util.MergeNestedDicts(
all_overrides, overrides)
return all_overrides
class CenterOfMassOffsetParameterRange(ParameterRange):
"""A parameter range for offsetting the simulated center-of-mass."""
def __init__(self, dim, offsets, distribution, body, body_name=''):
"""Constructor.
Args:
dim: Dimension to offset (one of 0, 1, 2).
offsets: Offsets to apply.
distribution: Parameter for specifying probability distribution.
body: String identifying the body in the 'sim' dictionary.
body_name: Name to be used in the score name.
"""
self.dim = dim
self.body = body
super(CenterOfMassOffsetParameterRange, self).__init__(
body_name + ' %s COM Offset [m]' % ['X', 'Y', 'Z'][dim], offsets,
distribution)
def GetOverrides(self, value):
return {
'sim': {
self.body: {
'mass_prop_uncertainties': {
'center_of_mass_offset': {
self.dim: value
}
}
}
}
}
class MassScaleParameterRange(ParameterRange):
"""A parameter range scaling the simulated kite mass."""
def __init__(self, scales, distribution, body, body_name=''):
"""Constructor.
Args:
scales: Scale factors to apply.
distribution: Parameter for specifying probability distribution.
body: String identifying the body in the 'sim' dictionary.
body_name: Name to be used in the score name.
"""
self.body = body
super(MassScaleParameterRange, self).__init__(
body_name + ' Mass Scaling [#]', scales, distribution)
def GetOverrides(self, value):
return {
'sim': {
self.body: {
'mass_prop_uncertainties': {
'mass_scale': value,
}
}
}
}
class InertiaScaleParameterRange(ParameterRange):
"""A parameter range scaling the simulated kite moments-of-inertia."""
def __init__(self, dim, scales, distribution, body, body_name=''):
"""Constructor.
Args:
dim: Dimension to apply the scaling to (one of 0, 1, 2).
scales: Scale factors to apply.
distribution: Parameter for specifying probability distribution.
body: String identifying the body in the 'sim' dictionary.
body_name: Name to be used in the score name.
"""
self.dim = dim
self.body = body
super(InertiaScaleParameterRange, self).__init__(
body_name + ' %s Inertia Scaling [#]' % ['X', 'Y', 'Z'][dim], scales,
distribution)
def GetOverrides(self, value):
return {
'sim': {
self.body: {
'mass_prop_uncertainties': {
'moment_of_inertia_scale': {
self.dim: value
}
}
}
}
}
class AeroSimOffsetParameterRange(ParameterRange):
"""A parameter range overriding the aerodynamic offsets."""
def __init__(self, field, offsets, distribution):
"""Constructor.
Args:
field: Offset to adjust.
offsets: A list of values to sweep over.
distribution: Parameter for specifying probability distribution.
"""
self.field = field
super(AeroSimOffsetParameterRange, self).__init__(
field + ' Offset', offsets, distribution)
def GetOverrides(self, value):
return {
'sim': {
'aero_sim': {
'coeff_offsets': {
self.field: value
}
}
}
}
class AeroSimMomentBFlapScalingParameterRange(ParameterRange):
"""A parameter range for scaling the moments from flap deflections."""
def __init__(self, label, moment, basis, scale_differences,
distribution):
"""Constructor.
Args:
label: Name to display.
moment: Index in (0, 1, 2) indicating which moment to scale.
basis: Array of kNumFlaps values to multiply scale_differences by.
scale_differences: Value to add to 1.0 to generate the scaling.
distribution: Parameter for specifying probability distribution.
"""
self.moment = moment
self.basis = basis
super(AeroSimMomentBFlapScalingParameterRange, self).__init__(
label, scale_differences, distribution)
def GetOverrides(self, value):
scale_factor = 1.0 + value
return {
'sim': {
'aero_sim': {
'moment_coeff_b_scale_factors': {
'flap_derivatives': {
flap: {
self.moment: scale_factor * self.basis[flap]
}
for flap in range(system_types.kNumFlaps)
if numpy.abs(self.basis[flap]) > 0.0
}
}
}
}
}
class AeroSimForceBFlapScalingParameterRange(ParameterRange):
"""A parameter range for scaling the forces from flap deflections."""
def __init__(self, label, force, basis, scale_differences,
distribution):
"""Constructor.
Args:
label: Name to display.
force: Index in (0, 1, 2) indicating which force to scale.
basis: Array of kNumFlaps values to multiply scale_differences by.
scale_differences: Value to add to 1.0 to generate the scaling.
distribution: Parameter for specifying probability distribution.
"""
self.force = force
self.basis = basis
super(AeroSimForceBFlapScalingParameterRange, self).__init__(
label, scale_differences, distribution)
def GetOverrides(self, value):
scale_factor = 1.0 + value
return {
'sim': {
'aero_sim': {
'force_coeff_w_scale_factors': {
'flap_derivatives': {
flap: {
self.force: scale_factor * self.basis[flap]
}
for flap in range(system_types.kNumFlaps)
if numpy.abs(self.basis[flap]) > 0.0
}
}
}
}
}
class AeroSimMomentBRateScalingParameterRange(ParameterRange):
"""A parameter range for scaling the body aero moments from body rates."""
moments = ['l', 'm', 'n']
def __init__(self, moment, rate, scale_differences, distribution):
"""Constructor.
Args:
moment: Index in (0, 1, 2) indicating which moment to scale.
rate: One of 'p', 'q', or 'r'.
scale_differences: Value to add to 1.0 to generate the scaling.
distribution: Parameter for specifying probability distribution.
"""
self.moment = moment
self.rate = rate
super(AeroSimMomentBRateScalingParameterRange, self).__init__(
'C%s%s Scaling Factor Offset [#]'
% (self.moments[self.moment], self.rate),
scale_differences, distribution)
def GetOverrides(self, value):
scaling = 1.0 + value
return {
'sim': {
'aero_sim': {
'moment_coeff_b_scale_factors': {
'rate_derivatives': {
self.rate: {
self.moment: scaling
}
}
}
}
}
}
class AeroSimForceBRateScalingParameterRange(ParameterRange):
"""A parameter range for scaling the wind aero forces from body rates."""
forces = ['D', 'Y', 'L']
def __init__(self, force, rate, scale_differences, distribution):
"""Constructor.
Args:
force: Index in (0, 1, 2) indicating which force to scale.
rate: One of 'p', 'q', or 'r'.
scale_differences: Value to add to 1.0 to generate the scaling.
distribution: Parameter for specifying probability distribution.
"""
self.force = force
self.rate = rate
super(AeroSimForceBRateScalingParameterRange, self).__init__(
'C%s%s Scaling Factor Offset [#]'
% (self.forces[self.force], self.rate),
scale_differences, distribution)
def GetOverrides(self, value):
scaling = 1.0 + value
return {
'sim': {
'aero_sim': {
'force_coeff_w_scale_factors': {
'rate_derivatives': {
self.rate: {
self.force: scaling
}
}
}
}
}
}
class AeroSimFlapOffsetParameterRange(ParameterRange):
"""A parameter range overriding the flap offsets."""
def __init__(self, label, basis, offsets, distribution):
"""Constructor.
Args:
label: Label for this range.
basis: Basis vector to scale (numpy.array of kNumFlaps elements).
offsets: A list of values to sweep over.
distribution: Parameter for specifying probability distribution.
"""
self.basis = basis
super(AeroSimFlapOffsetParameterRange, self).__init__(label, offsets,
distribution)
def GetOverrides(self, value):
return {
'sim': {
'aero_sim': {
'flap_offsets': {
flap: value * self.basis[flap]
for flap in range(system_types.kNumFlaps)
if numpy.abs(self.basis[flap]) > 0.0
}
}
}
}
class AirDensityParameterRange(ParameterRange):
"""A parameter range overriding the simulator's air density."""
def __init__(self, air_densities, distribution):
"""Constructor.
Args:
air_densities: A list of air densities defining this parameter range.
distribution: Parameter for specifying probability distribution.
"""
super(AirDensityParameterRange, self).__init__(
'Air Density [kg/m^3]', air_densities, distribution)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'air_density': value
}
}
}
class WindSpeedParameterRange(ParameterRange):
"""A parameter range overriding the wind speed."""
def __init__(self, wind_speeds, wind_shear_ref_height_agl=None,
t_updates=None, max_wind_speed=None):
"""Constructor.
Args:
wind_speeds: A list of wind speeds defining this parameter range.
wind_shear_ref_height_agl: Above-ground-level reference height [m] for
the wind shear model.
t_updates: A list of times [s] when wind speed updates are applied.
max_wind_speed: Speed [m/s] used to saturate the mean wind speed before
the second entry and after the third entry in t_updates.
"""
if wind_shear_ref_height_agl is not None:
label = 'Wind Speed [m/s] @ %.f [m] AGL' % wind_shear_ref_height_agl
else:
label = 'Wind Speed [m/s]'
super(WindSpeedParameterRange, self).__init__(
label, wind_speeds, distribution=None)
self._wind_shear_ref_height_agl = wind_shear_ref_height_agl
self._t_updates = t_updates
self._max_wind_speed = max_wind_speed
def GetOverrides(self, value):
assert value >= 0, ('Wind speed must be positive. '
'Use WindDirectionDegParameterRange override to assign '
'appropriate direction.')
overrides = {
'sim': {
'phys_sim': {
'wind_speed': value
}
}
}
if self._wind_shear_ref_height_agl is not None:
overrides['sim']['phys_sim']['wind_shear_ref_height_agl'] = (
self._wind_shear_ref_height_agl)
if self._t_updates is not None:
num_updates = len(self._t_updates)
assert num_updates == 3, (
'The wind speed saturation logic in batch sims requires 3 updates.')
offset_value = min(0.0, self._max_wind_speed - value)
wind_speed_offsets = [{
't_update': self._t_updates[0],
'offset': offset_value,
}, {
't_update': self._t_updates[1],
'offset': 0.0,
}, {
't_update': self._t_updates[2],
'offset': offset_value,
}]
overrides['sim']['phys_sim']['wind_speed_update'] = (
overrides_util.PreprocessWindSpeedUpdates(wind_speed_offsets))
return overrides
class WindDirectionDegParameterRange(ParameterRange):
"""A parameter range overriding the wind_direction."""
def __init__(self, wind_directions, distribution):
"""Constructor.
Args:
wind_directions: A list of wind directions [deg].
distribution: Parameter for specifying probability distribution.
"""
super(WindDirectionDegParameterRange, self).__init__(
'Wind Direction [deg]', wind_directions, distribution)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_direction': numpy.deg2rad(value % 360.0)
}
}
}
class WindElevationDegParameterRange(ParameterRange):
"""A parameter range overriding the wind elevation."""
def __init__(self, elevations, distribution):
"""Constructor.
Args:
elevations: A list of wind elevations [deg].
distribution: Parameter for specifying probability distribution.
"""
super(WindElevationDegParameterRange, self).__init__(
'Wind Elevation [deg]', elevations, distribution)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_elevation': numpy.deg2rad(value)
}
}
}
class WindDatabaseInitialTimeParameterRange(ParameterRange):
"""A parameter range overriding the initial time of the wind database."""
def __init__(self, times, distribution):
"""Constructor.
Args:
times: A list of initial times [s].
distribution: Parameter for specifying probability distribution.
"""
super(WindDatabaseInitialTimeParameterRange, self).__init__(
'Wind Database Initial Time [s]', times, distribution)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_database_initial_time': value
}
}
}
class WindDatabaseYOffsetParameterRange(ParameterRange):
"""A parameter range overriding the y offset of the wind database."""
def __init__(self, offset_positions, distribution):
"""Constructor.
Args:
offset_positions: A list of offset positions [m].
distribution: Parameter for specifying probability distribution.
"""
super(WindDatabaseYOffsetParameterRange, self).__init__(
'Wind Database Y offset [m]', offset_positions, distribution)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_database_y_offset': value
}
}
}
class WindVeerDegParameterRange(ParameterRange):
"""A parameter range overriding the wind veer."""
def __init__(self, wind_directions, distribution, start_height_agl=100.0,
end_height_agl=400.0):
"""Constructor.
Args:
wind_directions: A list of changes in wind direction [deg].
distribution: Parameter for specifying probability distribution.
start_height_agl: Height [m] above-ground-level at which to start
the direction change.
end_height_agl: Height [m] above-ground-level at which to end
the direction change.
"""
label = 'Wind Veer [deg] from %.f to %.f [m] AGL' % (start_height_agl,
end_height_agl)
super(WindVeerDegParameterRange, self).__init__(label, wind_directions,
distribution)
self._start_height = start_height_agl
self._end_height = end_height_agl
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_veer': numpy.deg2rad(value),
'wind_veer_start_height_agl': self._start_height,
'wind_veer_end_height_agl': self._end_height,
}
}
}
class WindShearExponentParameterRange(ParameterRange):
"""A parameter range overriding the wind shear exponent."""
def __init__(self, wind_shear_exponents):
"""Constructor.
Args:
wind_shear_exponents: A list of exponents defining this parameter range.
"""
super(WindShearExponentParameterRange, self).__init__(
'Wind Shear [#]', wind_shear_exponents, distribution=None)
def GetOverrides(self, value):
return {
'sim': {
'phys_sim': {
'wind_shear_exponent': value,
}
}
}
class PitotPitchDegParameterRange(ParameterRange):
"""A parameter range offsetting the pitch angle of the Pitot."""
def __init__(self, angles_deg, distribution):
"""Constructor.
Args:
angles_deg: A list of pitch angles [deg] defining this parameter range.
distribution: Parameter for specifying probability distribution.
"""
super(PitotPitchDegParameterRange, self).__init__(
'Pitot Pitch Angle Offset [deg]', angles_deg, distribution)
def GetOverrides(self, value):
return {
'sim': {
'pitots_sim': {
0: {'pitch_offset': numpy.deg2rad(value)},
1: {'pitch_offset': numpy.deg2rad(value)}
}
}
}
class PitotYawDegParameterRange(ParameterRange):
"""A parameter range offsetting the yaw angle of the Pitot."""
def __init__(self, angles_deg, distribution):
"""Constructor.
Args:
angles_deg: A list of yaw angles [deg] defining this parameter range.
distribution: Parameter for specifying probability distribution.
"""
super(PitotYawDegParameterRange, self).__init__(
'Pitot Yaw Angle Offset [deg]', angles_deg, distribution)
def GetOverrides(self, value):
return {
'sim': {
'pitots_sim': {
0: {'yaw_offset': numpy.deg2rad(value)},
1: {'yaw_offset': numpy.deg2rad(value)}
}
}
}
class PitotCpOffsetParameterRange(ParameterRange):
"""A parameter range offsetting the pressure coefficient of the Pitot."""
def __init__(self, cp_offsets, distribution):
"""Constructor.
Args:
cp_offsets: A list of pressure coefficient offsets [#] defining this
parameter range.
distribution: Parameter for specifying probability distribution.
"""
super(PitotCpOffsetParameterRange, self).__init__('Pitot Cp Offset [#]',
cp_offsets, distribution)
def GetOverrides(self, value):
return {
'sim': {
'pitots_sim': {
0: {'local_pressure_coeff_offset': value},
1: {'local_pressure_coeff_offset': value}
}
}
}
class BuoyModelParameterRange(ParameterRange):
"""Class for defining uncertainties in buoy model parameters."""
def __init__(self, name, offsets, distribution, category, variable):
"""Constructor.
Args:
name: Name of the uncertain variable.
offsets: A list of uncertainties defining this parameter range.
distribution: Parameter for specifying probability distribution.
category: String identifying the model parameter category in the buoy_sim
dictionary.
variable: String identifying the variable.
"""
super(BuoyModelParameterRange, self).__init__(name, offsets, distribution)
self.category = category
self.variable = variable
def GetOverrides(self, value):
return {
'sim': {
'buoy_sim': {
self.category: {
'uncertainties': {
self.variable: value,
}
}
}
}
}
class FaultParameterRange(ParameterRange):
"""A parameter range adding a single fault to a simulation.
Attributes:
label: Name for this parameter range.
"""
def __init__(self, label, t_start, t_end, component, fault_type,
parameters_start, parameters_end,
num_parameters):
"""Constructor for the range of faults.
A given fault is applied with varying parameters by linearly interpolating
between two parameter sets.
Args:
label: The name for this parameter range.
t_start: When the fault should start.
t_end: When the fault should end.
component: Name of the component for the fault.
fault_type: Type of fault to apply.
parameters_start: Initial parameter values for the sweep.
parameters_end: Final parameter values for the sweep.
num_parameters: Number of parameters to include in the sweep.
"""
self._t_start = t_start
self._t_end = t_end
self._component = component
self._fault_type = fault_type
self.values = [None for _ in range(num_parameters)]
for parameter_idx in range(num_parameters):
if num_parameters == 1:
interp = 1.0
else:
interp = parameter_idx / (num_parameters - 1.0)
self.values[parameter_idx] = (
(1.0 - interp) * numpy.array(parameters_start)
+ interp * numpy.array(parameters_end))
super(FaultParameterRange, self).__init__(
label, self.values, distribution=None)
def GetDisplayValue(self, value):
return self._GetParameterValue(value)
def _GetParameterValue(self, parameters):
raise NotImplementedError
def GetOverrides(self, value):
return {
'sim': {
'faults_sim': [{
't_start': self._t_start,
't_end': self._t_end,
'component': self._component,
'type': self._fault_type,
'parameters': value.tolist(),
}]
}
}
class ParameterRangeTable(OverridesTable):
"""An OverridesTable built out of two ParameterRanges.
Given two parameter ranges, constructs a Cartesian product of simulations
whose overrides combine those provided by each range.
"""
def __init__(self, name, x_parameter_range, y_parameter_range,
base_overrides=None, turbsim_database_selector=None,
wave_parameter_selector=None):
"""Constructor.
Args:
name: Name to give to this table.
x_parameter_range: A ParameterRange.
y_parameter_range: A ParameterRange.
base_overrides: Overrides to apply to all table entries.
turbsim_database_selector: Information about the online TurbSim folder
from which to pull wind databases.
wave_parameter_selector: Information about the sea state variations and
the parameter correlations.
"""
x_values = [
x_parameter_range.GetDisplayValue(v) for v in x_parameter_range.values
]
y_values = [
y_parameter_range.GetDisplayValue(v) for v in y_parameter_range.values
]
super(ParameterRangeTable, self).__init__(
name, x_parameter_range.label, x_values,
y_parameter_range.label, y_values, base_overrides=base_overrides)
dim = self.GetDimensions()
self._overrides_desc = [
[None for _ in range(dim[1])] for _ in range(dim[0])
]
self._overrides = [[None for _ in range(dim[1])] for _ in range(dim[0])]
self._overrides_limits = [
[None for _ in range(dim[1])] for _ in range(dim[0])
]
for x_idx, y_idx in numpy.ndindex((len(x_parameter_range.values),
len(y_parameter_range.values))):
overrides_desc = [
(x_parameter_range.label,
x_parameter_range.GetDisplayValue(x_parameter_range.values[x_idx])),
(y_parameter_range.label,
y_parameter_range.GetDisplayValue(y_parameter_range.values[y_idx]))
]
# Overrides specified in y_parameter range will step on those that
# are also specified in x_parameter range to allow custom sweep to
# modify overrides.
overrides = dict_util.UpdateNestedDict(
x_parameter_range.GetOverrides(x_parameter_range.values[x_idx]),
y_parameter_range.GetOverrides(y_parameter_range.values[y_idx]))
x_param_values = x_parameter_range.GetRangeValues()
y_param_values = y_parameter_range.GetRangeValues()
overrides_limits = [(x_param_values[0], x_param_values[-1])]
overrides_limits += [(y_param_values[0], y_param_values[-1])]
# If a TurbSimDatabaseSelector is given, get overrides based on wind
# conditions and pick apppropriate TurbSim database.
if turbsim_database_selector:
# For shear sweeps, shear value is in overrides, not base_overrides, so
# need to update them so turbsim_database_selector has the info needed.
updated_overrides = dict_util.UpdateNestedDict(
base_overrides, overrides)
specific_override, file_index = (
turbsim_database_selector.GetSpecificOverride(updated_overrides, 0))
overrides_desc += [(turbsim_database_selector.label, file_index)]
# Specific override effects turbsim only, so use merge instead of
# update to ensure we're not specifying turbsim database in
# conflicting flags.
overrides = dict_util.MergeNestedDicts(overrides, specific_override)
num_databases_with_wind_condition = (
turbsim_database_selector.GetNumAppropriateDatabases(
updated_overrides))
overrides_limits += [(0, num_databases_with_wind_condition-1)]
if wave_parameter_selector:
overrides = wave_parameter_selector.AddWaveOverrides(
overrides, base_overrides)
self._overrides_desc[x_idx][y_idx] = overrides_desc
self._overrides[x_idx][y_idx] = overrides
self._overrides_limits[x_idx][y_idx] = overrides_limits
def GetOverridesDesc(self, x_idx, y_idx):
return self._overrides_desc[x_idx][y_idx]
def GetOverridesLimits(self, x_idx, y_idx):
return self._overrides_limits[x_idx][y_idx]
def GetOverrides(self, x_idx, y_idx):
return self._overrides[x_idx][y_idx]
class ParameterRangeMonteCarloTable(OverridesTable):
"""An OverridesTable for performing Monte Carlo Trials.
Given a list of parameter ranges, randomly selects values and runs
simulations.
"""
def __init__(self, name, dim, parameter_ranges, randomize_seed=True,
base_overrides=None, turbsim_database_selector=None,
wave_parameter_selector=None):
"""Constructor.
Args:
name: Name to give to this table.
dim: Tuple containing the number of rows and columns.
parameter_ranges: List of ParameterRange.
randomize_seed: Whether to randomize the simulator's seed.
base_overrides: Overrides to be applied to each simulation.
turbsim_database_selector: Information about the online TurbSim folder
from which to pull wind databases from.
wave_parameter_selector: Information about the sea state variations and
the parameter correlations.
"""
super(ParameterRangeMonteCarloTable, self).__init__(
name, 'Row', range(0, dim[0]), 'Column', range(0, dim[1]),
base_overrides=base_overrides)
self._overrides_desc = [
[None for _ in range(dim[1])] for _ in range(dim[0])
]
self._overrides = [[None for _ in range(dim[1])] for _ in range(dim[0])]
self._overrides_limits = [
[None for _ in range(dim[1])] for _ in range(dim[0])
]
for x_idx, y_idx in numpy.ndindex((dim[0], dim[1])):
if randomize_seed:
seed_offset = _random.Generator().randint(65536)
overrides_desc = [('Random Seed Offset', seed_offset)]
overrides = {'sim': {'random_seed_offset': seed_offset}}
overrides_limits = [('NA', 'NA')]
else:
overrides_desc = []
overrides = {}
overrides_limits = []
# Do not override any parameter for the first cell of each block.
if x_idx == 0 and y_idx == 0:
overrides_desc = []
overrides = {}
overrides_limits = []
else:
for parameter_range in parameter_ranges:
random_override, value = parameter_range.GetRandomOverride()
overrides_desc += [(parameter_range.label, value)]
overrides = dict_util.MergeNestedDicts(overrides, random_override)
param_values = parameter_range.GetRangeValues()
overrides_limits += [(param_values[0], param_values[-1])]
# If a TurbSimDatabaseSelector is given, get overrides based on wind
# conditions and pick appropriate TurbSim database.
if turbsim_database_selector:
if x_idx == 0 and y_idx == 0:
# Use baseline wind database for first cell of each block.
merged_overrides = dict_util.MergeNestedDicts(overrides,
base_overrides)
specific_override, file_index = (
turbsim_database_selector.GetSpecificOverride(merged_overrides,
0))
overrides = dict_util.MergeNestedDicts(overrides, specific_override)
overrides_desc = []
overrides_limits = []
else:
# With TurbsimDatabaseSelector class in its own util file,
# the random number generation needs to be pulled out and
# kept within parameter_tables.
random_num = _random.Generator().randint(
turbsim_database_selector.GetNumAppropriateDatabases(
base_overrides))
random_override, file_index = (
turbsim_database_selector.GetSpecificOverride(base_overrides,
random_num))
overrides = dict_util.MergeNestedDicts(overrides, random_override)
overrides_desc += [(turbsim_database_selector.label, file_index)]
num_databases_with_wind_condition = (
turbsim_database_selector.GetNumAppropriateDatabases(
base_overrides))
overrides_limits += [(0, num_databases_with_wind_condition-1)]
if wave_parameter_selector and not (x_idx == 0 and y_idx == 0):
overrides = wave_parameter_selector.AddWaveOverrides(
overrides, base_overrides)
self._overrides_desc[x_idx][y_idx] = overrides_desc
self._overrides[x_idx][y_idx] = overrides
self._overrides_limits[x_idx][y_idx] = overrides_limits
def GetOverridesDesc(self, x_idx, y_idx):
return self._overrides_desc[x_idx][y_idx]
def GetOverridesLimits(self, x_idx, y_idx):
return self._overrides_limits[x_idx][y_idx]
def GetOverrides(self, x_idx, y_idx):
return self._overrides[x_idx][y_idx]
class DisturbancesSimClient(OverridesTableSimClient):
"""Abstract client for generating tables."""
def __init__(self, output_dir, base_overrides, x_parameter_range,
disturbances, scoring_functions, **kwargs):
"""Constructor for a disturbance sim.
Args:
output_dir: Directory to write outputs.
base_overrides: Overrides to apply to each table.
x_parameter_range: Variable to sweep along the x_axis.
disturbances: A dictionary mapping table names to disturbance ranges.
scoring_functions: A list of ScoringFunctions.
**kwargs: See client_base.BatchSimClient.
"""
tables = [
ParameterRangeTable(disturbance_name,
x_parameter_range, disturbance_range,
base_overrides=base_overrides)
for (disturbance_name, disturbance_range) in disturbances
]
super(DisturbancesSimClient, self).__init__(
output_dir, tables, scoring_functions, **kwargs)
|
apache-2.0
|
waterponey/scikit-learn
|
examples/cluster/plot_cluster_iris.py
|
350
|
2593
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
|
bsd-3-clause
|
marioluan/mit-opencourseware-cs
|
600/unit-2/sampling-and-monte-carlo-simulation/problem-set/ps6/ps6_pkgtest.py
|
3
|
11790
|
##import matplotlib
import numpy
import pylab
data = [(4.9,'2011/03/12 23:57:34'),
(4.9,'2011/03/12 23:53:45'),
(5.0,'2011/03/12 23:51:24'),
(5.2,'2011/03/12 23:40:49'),
(5.1,'2011/03/12 23:37:24'),
(6.1,'2011/03/12 23:24:50'),
(5.4,'2011/03/12 23:20:42'),
(3.0,'2011/03/12 23:12:18'),
(4.7,'2011/03/12 22:53:35'),
(4.8,'2011/03/12 22:42:39'),
(5.6,'2011/03/12 22:31:27'),
(6.3,'2011/03/12 22:12:46'),
(4.9,'2011/03/12 22:05:17'),
(4.5,'2011/03/12 21:58:40'),
(5.3,'2011/03/12 21:58:17'),
(5.5,'2011/03/12 21:48:09'),
(5.0,'2011/03/12 21:40:58'),
(5.0,'2011/03/12 21:38:35'),
(4.8,'2011/03/12 20:48:48'),
(4.9,'2011/03/12 20:41:25'),
(4.3,'2011/03/12 20:16:59'),
(5.2,'2011/03/12 20:09:55'),
(5.0,'2011/03/12 20:08:25'),
(4.8,'2011/03/12 19:59:01'),
(4.6,'2011/03/12 19:55:28'),
(5.0,'2011/03/12 19:45:19'),
(4.7,'2011/03/12 19:43:47'),
(4.8,'2011/03/12 19:38:08'),
(4.9,'2011/03/12 19:22:47'),
(5.1,'2011/03/12 19:11:59'),
(4.8,'2011/03/12 19:01:05'),
(4.5,'2011/03/12 18:51:37'),
(4.8,'2011/03/12 18:38:38'),
(5.0,'2011/03/12 18:28:39'),
(4.7,'2011/03/12 18:09:15'),
(4.7,'2011/03/12 17:59:57'),
(4.7,'2011/03/12 17:54:19'),
(4.8,'2011/03/12 17:51:27'),
(4.8,'2011/03/12 17:48:13'),
(4.8,'2011/03/12 17:40:56'),
(6.0,'2011/03/12 17:19:24'),
(5.1,'2011/03/12 17:13:02'),
(5.2,'2011/03/12 17:11:09'),
(5.1,'2011/03/12 17:01:22'),
(4.8,'2011/03/12 16:55:41'),
(4.7,'2011/03/12 16:48:14'),
(5.0,'2011/03/12 16:38:45'),
(4.9,'2011/03/12 16:36:41'),
(5.0,'2011/03/12 16:22:15'),
(4.8,'2011/03/12 16:19:05'),
(4.7,'2011/03/12 16:07:39'),
(4.8,'2011/03/12 16:03:56'),
(4.9,'2011/03/12 14:53:21'),
(5.7,'2011/03/12 14:43:09'),
(5.6,'2011/03/12 14:35:00'),
(4.9,'2011/03/12 14:14:56'),
(5.3,'2011/03/12 14:11:05'),
(5.8,'2011/03/12 14:03:30'),
(5.2,'2011/03/12 13:57:12'),
(5.3,'2011/03/12 13:26:56'),
(6.4,'2011/03/12 13:15:42'),
(5.8,'2011/03/12 12:53:50'),
(4.9,'2011/03/12 12:50:28'),
(4.9,'2011/03/12 12:43:13'),
(4.8,'2011/03/12 12:27:17'),
(4.9,'2011/03/12 12:15:31'),
(4.9,'2011/03/12 12:06:57'),
(4.3,'2011/03/12 12:06:16'),
(4.4,'2011/03/12 12:03:43'),
(5.7,'2011/03/12 11:46:01'),
(4.9,'2011/03/12 11:39:05'),
(4.9,'2011/03/12 11:20:16'),
(4.9,'2011/03/12 11:05:00'),
(6.1,'2011/03/12 10:53:31'),
(4.7,'2011/03/12 10:49:24'),
(5.0,'2011/03/12 10:39:12'),
(5.3,'2011/03/12 10:34:49'),
(5.5,'2011/03/12 10:20:22'),
(4.4,'2011/03/12 10:17:09'),
(4.9,'2011/03/12 10:06:12'),
(5.0,'2011/03/12 10:00:26'),
(5.2,'2011/03/12 09:51:35'),
(4.9,'2011/03/12 09:47:59'),
(5.1,'2011/03/12 09:40:44'),
(5.0,'2011/03/12 09:27:12'),
(5.4,'2011/03/12 09:18:56'),
(4.7,'2011/03/12 09:05:33'),
(5.2,'2011/03/12 09:00:03'),
(4.6,'2011/03/12 08:58:24'),
(5.0,'2011/03/12 08:52:50'),
(5.0,'2011/03/12 08:45:30'),
(5.0,'2011/03/12 08:38:40'),
(4.8,'2011/03/12 08:38:04'),
(4.9,'2011/03/12 08:30:22'),
(4.6,'2011/03/12 08:22:07'),
(5.0,'2011/03/12 08:13:42'),
(5.2,'2011/03/12 07:54:10'),
(4.8,'2011/03/12 07:50:54'),
(4.7,'2011/03/12 07:46:42'),
(4.9,'2011/03/12 07:38:06'),
(4.7,'2011/03/12 07:30:18'),
(4.9,'2011/03/12 07:21:00'),
(5.1,'2011/03/12 07:18:53'),
(5.1,'2011/03/12 07:13:35'),
(5.0,'2011/03/12 07:07:32'),
(4.7,'2011/03/12 07:02:21'),
(4.5,'2011/03/12 06:53:53'),
(4.8,'2011/03/12 06:49:12'),
(4.8,'2011/03/12 06:44:01'),
(4.8,'2011/03/12 06:39:26'),
(5.0,'2011/03/12 06:36:00'),
(4.9,'2011/03/12 06:29:10'),
(2.9,'2011/03/12 06:20:47'),
(5.5,'2011/03/12 06:18:43'),
(5.5,'2011/03/12 06:10:44'),
(5.1,'2011/03/12 06:10:23'),
(5.2,'2011/03/12 06:00:25'),
(5.1,'2011/03/12 05:58:59'),
(5.0,'2011/03/12 05:14:51'),
(5.3,'2011/03/12 04:52:58'),
(5.1,'2011/03/12 04:47:19'),
(5.0,'2011/03/12 04:43:04'),
(4.7,'2011/03/12 04:37:21'),
(5.2,'2011/03/12 04:06:09'),
(5.5,'2011/03/12 04:04:49'),
(5.1,'2011/03/12 03:54:48'),
(5.3,'2011/03/12 03:34:46'),
(5.3,'2011/03/12 03:29:28'),
(4.8,'2011/03/12 03:21:44'),
(5.7,'2011/03/12 03:11:59'),
(5.8,'2011/03/12 03:01:49'),
(5.6,'2011/03/12 02:47:36'),
(2.6,'2011/03/12 02:43:49'),
(5.0,'2011/03/12 02:43:11'),
(5.2,'2011/03/12 02:34:05'),
(4.8,'2011/03/12 02:27:50'),
(4.9,'2011/03/12 02:13:51'),
(4.9,'2011/03/12 02:07:21'),
(4.8,'2011/03/12 02:04:55'),
(5.2,'2011/03/12 01:59:44'),
(6.8,'2011/03/12 01:47:16'),
(6.2,'2011/03/12 01:46:21'),
(5.2,'2011/03/12 01:43:20'),
(6.0,'2011/03/12 01:34:10'),
(5.1,'2011/03/12 01:25:04'),
(6.1,'2011/03/12 01:19:07'),
(5.7,'2011/03/12 01:17:41'),
(5.4,'2011/03/12 01:17:02'),
(4.8,'2011/03/12 01:12:16'),
(5.1,'2011/03/12 01:03:59'),
(5.5,'2011/03/12 00:45:10'),
(5.0,'2011/03/12 00:39:37'),
(5.0,'2011/03/12 00:25:08'),
(5.0,'2011/03/12 00:21:25'),
(3.0,'2011/03/12 00:04:09'),
(5.4,'2011/03/11 23:59:21'),
(5.3,'2011/03/11 23:58:04'),
(5.1,'2011/03/11 23:53:29'),
(5.1,'2011/03/11 23:40:12'),
(4.9,'2011/03/11 23:31:23'),
(5.3,'2011/03/11 23:26:51'),
(5.0,'2011/03/11 23:21:22'),
(4.9,'2011/03/11 23:05:15'),
(5.4,'2011/03/11 22:54:28'),
(5.8,'2011/03/11 22:51:18'),
(5.3,'2011/03/11 22:42:59'),
(5.0,'2011/03/11 22:36:57'),
(4.6,'2011/03/11 22:29:42'),
(4.9,'2011/03/11 22:22:36'),
(4.7,'2011/03/11 22:08:14'),
(2.9,'2011/03/11 22:01:26'),
(5.2,'2011/03/11 21:41:58'),
(4.8,'2011/03/11 21:34:25'),
(5.4,'2011/03/11 21:00:46'),
(5.1,'2011/03/11 20:41:24'),
(5.5,'2011/03/11 20:36:10'),
(5.1,'2011/03/11 20:34:40'),
(5.5,'2011/03/11 20:23:44'),
(6.3,'2011/03/11 20:11:23'),
(6.6,'2011/03/11 19:46:49'),
(3.2,'2011/03/11 19:46:27'),
(5.2,'2011/03/11 19:45:24'),
(5.5,'2011/03/11 19:31:56'),
(5.5,'2011/03/11 19:24:29'),
(6.1,'2011/03/11 19:02:59'),
(6.2,'2011/03/11 18:59:15'),
(4.9,'2011/03/11 18:55:15'),
(5.1,'2011/03/11 18:44:06'),
(4.9,'2011/03/11 18:43:14'),
(4.9,'2011/03/11 18:39:34'),
(5.9,'2011/03/11 18:17:06'),
(5.7,'2011/03/11 18:11:24'),
(4.7,'2011/03/11 18:02:39'),
(5.0,'2011/03/11 17:50:01'),
(5.4,'2011/03/11 17:32:14'),
(5.1,'2011/03/11 17:30:48'),
(5.0,'2011/03/11 17:23:57'),
(5.5,'2011/03/11 17:17:00'),
(4.8,'2011/03/11 17:15:00'),
(5.0,'2011/03/11 17:12:41'),
(5.0,'2011/03/11 16:55:53'),
(4.8,'2011/03/11 16:54:53'),
(5.0,'2011/03/11 16:34:22'),
(5.0,'2011/03/11 16:20:52'),
(5.5,'2011/03/11 16:11:27'),
(5.3,'2011/03/11 16:04:53'),
(5.0,'2011/03/11 15:55:23'),
(5.0,'2011/03/11 15:50:59'),
(5.0,'2011/03/11 15:46:02'),
(5.4,'2011/03/11 15:42:05'),
(4.9,'2011/03/11 15:36:16'),
(5.2,'2011/03/11 15:32:34'),
(5.6,'2011/03/11 15:19:38'),
(6.2,'2011/03/11 15:13:15'),
(5.0,'2011/03/11 15:01:39'),
(5.8,'2011/03/11 14:56:16'),
(5.4,'2011/03/11 14:54:04'),
(5.1,'2011/03/11 14:44:08'),
(5.4,'2011/03/11 14:26:31'),
(5.1,'2011/03/11 14:20:20'),
(2.6,'2011/03/11 14:18:29'),
(5.2,'2011/03/11 14:10:39'),
(5.5,'2011/03/11 14:00:38'),
(4.9,'2011/03/11 13:58:50'),
(5.2,'2011/03/11 13:55:28'),
(5.3,'2011/03/11 13:48:38'),
(5.6,'2011/03/11 13:43:10'),
(4.9,'2011/03/11 13:42:27'),
(5.6,'2011/03/11 13:34:36'),
(5.1,'2011/03/11 13:31:55'),
(2.5,'2011/03/11 13:21:37'),
(5.8,'2011/03/11 13:16:50'),
(5.1,'2011/03/11 13:15:45'),
(5.3,'2011/03/11 13:02:43'),
(5.3,'2011/03/11 12:59:21'),
(5.4,'2011/03/11 12:54:52'),
(5.6,'2011/03/11 12:49:01'),
(5.3,'2011/03/11 12:34:22'),
(5.2,'2011/03/11 12:33:19'),
(5.2,'2011/03/11 12:28:45'),
(5.3,'2011/03/11 12:24:37'),
(3.3,'2011/03/11 12:15:07'),
(5.9,'2011/03/11 12:12:53'),
(5.1,'2011/03/11 12:04:16'),
(5.5,'2011/03/11 11:56:16'),
(5.1,'2011/03/11 11:54:02'),
(5.8,'2011/03/11 11:46:47'),
(5.8,'2011/03/11 11:44:28'),
(6.5,'2011/03/11 11:36:39'),
(5.7,'2011/03/11 11:21:02'),
(5.5,'2011/03/11 11:16:51'),
(5.5,'2011/03/11 11:13:12'),
(5.5,'2011/03/11 11:10:58'),
(5.6,'2011/03/11 11:00:51'),
(5.1,'2011/03/11 10:58:06'),
(3.2,'2011/03/11 10:56:23'),
(5.0,'2011/03/11 10:52:08'),
(5.5,'2011/03/11 10:45:46'),
(5.3,'2011/03/11 10:35:36'),
(5.9,'2011/03/11 10:28:44'),
(5.6,'2011/03/11 10:20:27'),
(6.0,'2011/03/11 10:10:35'),
(5.2,'2011/03/11 09:59:57'),
(5.5,'2011/03/11 09:47:02'),
(2.5,'2011/03/11 09:45:08'),
(5.2,'2011/03/11 09:42:22'),
(5.4,'2011/03/11 09:37:08'),
(3.0,'2011/03/11 09:33:58'),
(2.9,'2011/03/11 09:24:53'),
(2.6,'2011/03/11 09:14:36'),
(2.6,'2011/03/11 09:10:26'),
(5.5,'2011/03/11 09:09:15'),
(2.8,'2011/03/11 09:05:22'),
(5.4,'2011/03/11 09:04:10'),
(2.5,'2011/03/11 09:03:56'),
(2.5,'2011/03/11 09:03:44'),
(3.3,'2011/03/11 09:03:38'),
(5.2,'2011/03/11 09:00:20'),
(4.6,'2011/03/11 08:58:26'),
(5.4,'2011/03/11 08:52:26'),
(5.5,'2011/03/11 08:46:48'),
(5.9,'2011/03/11 08:40:56'),
(6.1,'2011/03/11 08:31:08'),
(6.5,'2011/03/11 08:19:24'),
(6.2,'2011/03/11 08:15:41'),
(6.2,'2011/03/11 08:12:05'),
(5.5,'2011/03/11 08:10:31'),
(5.9,'2011/03/11 08:01:59'),
(5.6,'2011/03/11 07:56:16'),
(5.7,'2011/03/11 07:54:45'),
(5.8,'2011/03/11 07:42:55'),
(5.9,'2011/03/11 07:38:27'),
(4.4,'2011/03/11 07:36:12'),
(6.1,'2011/03/11 07:28:12'),
(6.1,'2011/03/11 07:25:33'),
(6.3,'2011/03/11 07:14:59'),
(5.9,'2011/03/11 07:13:47'),
(5.8,'2011/03/11 07:11:00'),
(6.3,'2011/03/11 06:57:15'),
(6.3,'2011/03/11 06:48:47'),
(7.1,'2011/03/11 06:25:51'),
(3.3,'2011/03/11 06:18:04'),
(6.8,'2011/03/11 06:15:40'),
(6.4,'2011/03/11 06:07:22'),
(6.4,'2011/03/11 06:06:11'),
(8.9,'2011/03/11 05:46:24'),
(3.4,'2011/03/11 04:51:25'),
(4.8,'2011/03/11 04:28:21'),
(4.5,'2011/03/11 04:05:41'),
(2.6,'2011/03/11 02:55:42'),
(2.9,'2011/03/11 02:52:08'),
(2.5,'2011/03/11 02:32:09'),
(2.8,'2011/03/11 01:02:00'),
(2.5,'2011/03/11 00:53:59'),
(4.0,'2011/03/11 00:25:29'),
(5.3,'2011/03/11 00:14:51'),
(4.9,'2011/03/10 22:44:26'),
(4.7,'2011/03/10 21:49:47'),
(2.8,'2011/03/10 20:49:08'),
(2.5,'2011/03/10 19:44:35'),
(5.0,'2011/03/10 19:06:11'),
(3.4,'2011/03/10 18:10:05'),
(6.5,'2011/03/10 17:08:37'),
(5.2,'2011/03/10 16:54:45'),
(3.2,'2011/03/10 15:56:25'),
(4.7,'2011/03/10 15:22:52'),
(4.7,'2011/03/10 14:30:34'),
(4.6,'2011/03/10 14:24:46'),
(2.7,'2011/03/10 13:45:34'),
(2.8,'2011/03/10 11:52:58'),
(5.1,'2011/03/10 11:21:08'),
(2.7,'2011/03/10 10:58:37'),
(5.2,'2011/03/10 09:02:22'),
(4.8,'2011/03/10 08:59:19'),
(5.7,'2011/03/10 08:08:21'),
(3.1,'2011/03/10 07:41:31'),
(4.8,'2011/03/10 07:33:04'),
(4.2,'2011/03/10 07:07:09'),
(3.2,'2011/03/10 06:19:01'),
(2.5,'2011/03/10 05:57:39'),
(2.5,'2011/03/10 05:01:11'),
(5.4,'2011/03/10 04:58:18'),
(4.6,'2011/03/10 04:26:48'),
(4.6,'2011/03/10 04:14:00'),
(2.8,'2011/03/10 02:34:17'),
(2.9,'2011/03/10 01:38:14'),
(5.0,'2011/03/10 01:20:24'),
(2.9,'2011/03/10 00:14:41'),
(4.8,'2011/03/09 23:57:42'),
(5.4,'2011/03/09 23:37:01'),
(2.6,'2011/03/09 22:41:30'),
(2.6,'2011/03/09 22:17:41'),
(6.5,'2011/03/09 21:24:52'),
(6.1,'2011/03/09 21:22:18'),
(4.9,'2011/03/09 21:00:58'),
(3.5,'2011/03/09 20:48:31'),
(6.0,'2011/03/09 18:44:35'),
(2.5,'2011/03/09 18:28:33'),
(3.7,'2011/03/09 18:25:26'),
(6.1,'2011/03/09 18:16:15'),
(4.9,'2011/03/09 17:57:27'),
(2.5,'2011/03/09 17:02:06'),
(2.5,'2011/03/09 16:12:03'),
(2.8,'2011/03/09 14:30:37'),
(4.8,'2011/03/09 14:24:06'),
(5.3,'2011/03/09 13:57:28'),
(3.2,'2011/03/09 13:55:24'),
(5.1,'2011/03/09 13:51:42'),
(5.0,'2011/03/09 13:24:08'),
(2.8,'2011/03/09 12:56:35'),
(2.6,'2011/03/09 12:14:14'),
(4.7,'2011/03/09 12:03:18'),
(5.1,'2011/03/09 11:27:52'),
(3.5,'2011/03/09 11:05:09'),
(4.7,'2011/03/09 10:13:40'),
(2.6,'2011/03/09 09:45:14'),
(4.8,'2011/03/09 08:55:38'),
(3.3,'2011/03/09 08:37:30'),
(5.3,'2011/03/09 08:02:36'),
(5.1,'2011/03/09 07:56:28'),
(5.0,'2011/03/09 07:13:48'),
(5.1,'2011/03/09 06:25:12'),
(4.9,'2011/03/09 06:12:13'),
(2.9,'2011/03/09 05:33:50'),
(4.7,'2011/03/09 05:27:06'),
(5.3,'2011/03/09 04:45:54'),
(5.7,'2011/03/09 04:37:04'),
(5.2,'2011/03/09 04:32:10'),
(3.0,'2011/03/09 04:17:17'),
(4.8,'2011/03/09 04:15:39'),
(5.2,'2011/03/09 04:05:54'),
(2.5,'2011/03/09 03:51:21'),
(5.0,'2011/03/09 03:19:00'),
(5.2,'2011/03/09 03:08:36'),
(5.6,'2011/03/09 02:57:17'),
(7.2,'2011/03/09 02:45:20'),
(4.6,'2011/03/09 01:47:47'),
(4.7,'2011/03/09 01:30:27')]
ydata = []
for t in data:
ydata.append(t[0])
pylab.plot(ydata)
pylab.title('Earthquake Magnitude in Japan from 3/9-3/12')
pylab.xlabel('Time')
pylab.ylabel('Magnitude')
pylab.show()
|
mit
|
cdegroc/scikit-learn
|
examples/plot_lda_vs_qda.py
|
2
|
2711
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print __doc__
from scipy import linalg
import numpy as np
import pylab as pl
import matplotlib as mpl
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# load sample dataset
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data[:, 0:2] # Take only 2 dimensions
y = iris.target
X = X[y > 0]
y = y[y > 0]
y -= 1
target_names = iris.target_names[1:]
###############################################################################
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
###############################################################################
# Plot results
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
xx, yy = np.meshgrid(np.linspace(4, 8.5, 200), np.linspace(1.5, 4.5, 200))
X_grid = np.c_[xx.ravel(), yy.ravel()]
zz_lda = lda.predict_proba(X_grid)[:, 1].reshape(xx.shape)
zz_qda = qda.predict_proba(X_grid)[:, 1].reshape(xx.shape)
pl.figure()
splot = pl.subplot(1, 2, 1)
pl.contourf(xx, yy, zz_lda > 0.5, alpha=0.5)
pl.scatter(X[y == 0, 0], X[y == 0, 1], c='b', label=target_names[0])
pl.scatter(X[y == 1, 0], X[y == 1, 1], c='r', label=target_names[1])
pl.contour(xx, yy, zz_lda, [0.5], linewidths=2., colors='k')
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'b')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'r')
pl.legend()
pl.axis('tight')
pl.title('Linear Discriminant Analysis')
splot = pl.subplot(1, 2, 2)
pl.contourf(xx, yy, zz_qda > 0.5, alpha=0.5)
pl.scatter(X[y == 0, 0], X[y == 0, 1], c='b', label=target_names[0])
pl.scatter(X[y == 1, 0], X[y == 1, 1], c='r', label=target_names[1])
pl.contour(xx, yy, zz_qda, [0.5], linewidths=2., colors='k')
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'b')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'r')
pl.legend()
pl.axis('tight')
pl.title('Quadratic Discriminant Analysis')
pl.show()
|
bsd-3-clause
|
nvoron23/statsmodels
|
statsmodels/tsa/seasonal.py
|
27
|
5392
|
"""
Seasonal Decomposition by Moving Averages
"""
from statsmodels.compat.python import lmap, range, iteritems
import numpy as np
from pandas.core.nanops import nanmean as pd_nanmean
from .filters._utils import _maybe_get_pandas_wrapper_freq
from .filters.filtertools import convolution_filter
from statsmodels.tsa.tsatools import freq_to_period
def seasonal_mean(x, freq):
"""
Return means for each period in x. freq is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean.
"""
return np.array([pd_nanmean(x[i::freq]) for i in range(freq)])
def seasonal_decompose(x, model="additive", filt=None, freq=None):
"""
Parameters
----------
x : array-like
Time series
model : str {"additive", "multiplicative"}
Type of seasonal component. Abbreviations are accepted.
filt : array-like
The filter coefficients for filtering out the seasonal component.
The default is a symmetric moving average.
freq : int, optional
Frequency of the series. Must be used if x is not a pandas
object with a timeseries index.
Returns
-------
results : obj
A object with seasonal, trend, and resid attributes.
Notes
-----
This is a naive decomposition. More sophisticated methods should
be preferred.
The additive model is Y[t] = T[t] + S[t] + e[t]
The multiplicative model is Y[t] = T[t] * S[t] * e[t]
The seasonal component is first removed by applying a convolution
filter to the data. The average of this smoothed series for each
period is the returned seasonal component.
See Also
--------
statsmodels.tsa.filters.convolution_filter
"""
_pandas_wrapper, pfreq = _maybe_get_pandas_wrapper_freq(x)
x = np.asanyarray(x).squeeze()
nobs = len(x)
if not np.all(np.isfinite(x)):
raise ValueError("This function does not handle missing values")
if model.startswith('m'):
if np.any(x <= 0):
raise ValueError("Multiplicative seasonality is not appropriate "
"for zero and negative values")
if pfreq is not None:
pfreq = freq_to_period(pfreq)
if freq and pfreq != freq:
raise ValueError("Inferred frequency of index and frequency "
"don't match. This function does not re-sample")
else:
freq = pfreq
elif freq is None:
raise ValueError("You must specify a freq or x must be a "
"pandas object with a timeseries index")
if filt is None:
if freq % 2 == 0: # split weights at ends
filt = np.array([.5] + [1] * (freq - 1) + [.5]) / freq
else:
filt = np.repeat(1./freq, freq)
trend = convolution_filter(x, filt)
# nan pad for conformability - convolve doesn't do it
if model.startswith('m'):
detrended = x / trend
else:
detrended = x - trend
period_averages = seasonal_mean(detrended, freq)
if model.startswith('m'):
period_averages /= np.mean(period_averages)
else:
period_averages -= np.mean(period_averages)
seasonal = np.tile(period_averages, nobs // freq + 1)[:nobs]
if model.startswith('m'):
resid = x / seasonal / trend
else:
resid = detrended - seasonal
results = lmap(_pandas_wrapper, [seasonal, trend, resid, x])
return DecomposeResult(seasonal=results[0], trend=results[1],
resid=results[2], observed=results[3])
class DecomposeResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
self.nobs = len(self.observed)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
if hasattr(self.observed, 'plot'): # got pandas use it
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.trend.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Trend')
self.seasonal.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Seasonal')
self.resid.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Residual')
else:
axes[0].plot(self.observed)
axes[0].set_ylabel('Observed')
axes[1].plot(self.trend)
axes[1].set_ylabel('Trend')
axes[2].plot(self.seasonal)
axes[2].set_ylabel('Seasonal')
axes[3].plot(self.resid)
axes[3].set_ylabel('Residual')
axes[3].set_xlabel('Time')
axes[3].set_xlim(0, self.nobs)
fig.tight_layout()
return fig
if __name__ == "__main__":
x = np.array([-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184])
results = seasonal_decompose(x, freq=4)
from pandas import DataFrame, DatetimeIndex
data = DataFrame(x, DatetimeIndex(start='1/1/1951',
periods=len(x),
freq='Q'))
res = seasonal_decompose(data)
|
bsd-3-clause
|
jzt5132/scikit-learn
|
benchmarks/bench_random_projections.py
|
397
|
8900
|
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
|
bsd-3-clause
|
girving/tensorflow
|
tensorflow/contrib/estimator/python/estimator/dnn_with_layer_annotations_test.py
|
6
|
22246
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_with_layer_annotations.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import dnn_with_layer_annotations
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn_with_layer_annotations.DNNClassifierWithLayerAnnotations(
*args, **kwargs)
class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(self, _dnn_classifier_fn,
_dnn_regressor_fn)
class DNNWithLayerAnnotationsClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierWithLayerAnnotationsPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierWithLayerAnnotationsTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn_with_layer_annotations.DNNRegressorWithLayerAnnotations(
*args, **kwargs)
class DNNWithLayerAnnotationsTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def _getLayerAnnotationCollection(self, graph, collection_name):
keys = graph.get_collection(
dnn_with_layer_annotations.LayerAnnotationsCollectionNames.keys(
collection_name))
values = graph.get_collection(
dnn_with_layer_annotations.LayerAnnotationsCollectionNames.values(
collection_name))
if len(keys) != len(values):
raise ValueError('keys and values should have same length. lengths were: '
'%d and %d, and elements were %s and %s' %
(len(keys), len(values), keys, values))
return dict(zip(keys, values))
def _testAnnotationsPresentForEstimator(self, estimator_class):
feature_columns = [
feature_column.numeric_column('x', shape=(1,)),
feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'y', vocabulary_list=['a', 'b', 'c']),
dimension=3)
]
estimator = estimator_class(
hidden_units=(2, 2),
feature_columns=feature_columns,
model_dir=self._model_dir)
model_fn = estimator.model_fn
graph = ops.Graph()
with graph.as_default():
model_fn({
'x': array_ops.constant([1.0]),
'y': array_ops.constant(['a'])
}, {},
model_fn_lib.ModeKeys.PREDICT,
config=None)
unprocessed_features = self._getLayerAnnotationCollection(
graph, dnn_with_layer_annotations.LayerAnnotationsCollectionNames
.UNPROCESSED_FEATURES)
processed_features = self._getLayerAnnotationCollection(
graph, dnn_with_layer_annotations.LayerAnnotationsCollectionNames
.PROCESSED_FEATURES)
feature_columns = graph.get_collection(
dnn_with_layer_annotations.LayerAnnotationsCollectionNames
.FEATURE_COLUMNS)
self.assertItemsEqual(unprocessed_features.keys(), ['x', 'y'])
self.assertEqual(2, len(processed_features.keys()))
self.assertEqual(2, len(feature_columns))
def testAnnotationsPresentForClassifier(self):
self._testAnnotationsPresentForEstimator(
dnn_with_layer_annotations.DNNClassifierWithLayerAnnotations)
def testAnnotationsPresentForRegressor(self):
self._testAnnotationsPresentForEstimator(
dnn_with_layer_annotations.DNNRegressorWithLayerAnnotations)
def _testCheckpointCompatibleWithNonAnnotatedEstimator(
self, train_input_fn, predict_input_fn, non_annotated_class,
annotated_class, prediction_key, estimator_args):
input_dimension = 2
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
estimator = non_annotated_class(
model_dir=self._model_dir,
hidden_units=(2, 2),
feature_columns=feature_columns,
**estimator_args)
estimator.train(train_input_fn, steps=10)
predictions = np.array(
[x[prediction_key] for x in estimator.predict(predict_input_fn)])
annotated_estimator = annotated_class(
model_dir=self._model_dir,
hidden_units=(2, 2),
feature_columns=feature_columns,
warm_start_from=self._model_dir,
**estimator_args)
annotated_predictions = np.array([
x[prediction_key] for x in annotated_estimator.predict(predict_input_fn)
])
self.assertAllEqual(predictions.shape, annotated_predictions.shape)
for i, (a, b) in enumerate(
zip(predictions.flatten(), annotated_predictions.flatten())):
self.assertAlmostEqual(a, b, msg='index=%d' % i)
def testCheckpointCompatibleForClassifier(self):
n_classes = 2
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(
np.rint(data[:batch_size]).astype(np.int64), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._testCheckpointCompatibleWithNonAnnotatedEstimator(
train_input_fn,
predict_input_fn,
dnn.DNNClassifier,
dnn_with_layer_annotations.DNNClassifierWithLayerAnnotations,
prediction_key=prediction_keys.PredictionKeys.PROBABILITIES,
estimator_args={'n_classes': n_classes})
def testCheckpointCompatibleForRegressor(self):
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._testCheckpointCompatibleWithNonAnnotatedEstimator(
train_input_fn,
predict_input_fn,
dnn.DNNRegressor,
dnn_with_layer_annotations.DNNRegressorWithLayerAnnotations,
prediction_key=prediction_keys.PredictionKeys.PREDICTIONS,
estimator_args={'label_dimension': label_dimension})
class DNNRegressorWithLayerAnnotationsEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorWithLayerAnnotationsPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorWithLayerAnnotationsTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorWithLayerAnnotationsIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn_with_layer_annotations.DNNRegressorWithLayerAnnotations(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierWithLayerAnnotationsIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn_with_layer_annotations.DNNClassifierWithLayerAnnotations(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
saimn/astropy
|
astropy/visualization/wcsaxes/ticklabels.py
|
8
|
9824
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_pad(rcParams['xtick.major.pad'])
self._exclude_overlapping = False
# Stale if either xy positions haven't been calculated, or if
# something changes that requires recomputing the positions
self._stale = True
# Check rcParams
if 'color' not in kwargs:
self.set_color(rcParams['xtick.color'])
if 'size' not in kwargs:
self.set_size(rcParams['xtick.labelsize'])
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
self._stale = True
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
self._stale = True
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
# Remove any empty LaTeX inline math mode string
if self.text[axis][i] == '$$':
self.text[axis][i] = ''
self._stale = True
def set_pad(self, value):
self._pad = value
self._stale = True
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
self._stale = True
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def _set_xy_alignments(self, renderer, tick_out_size):
"""
Compute and set the x, y positions and the horizontal/vertical alignment of
each label.
"""
if not self._stale:
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
visible_axes = self.get_visible_axes()
self.xy = {axis: {} for axis in visible_axes}
self.ha = {axis: {} for axis in visible_axes}
self.va = {axis: {} for axis in visible_axes}
for axis in visible_axes:
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
x, y = self.pixel[axis][i]
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = pad
dy = -text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = pad
x = x + dx
y = y + dy
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_text(self.text[axis][i])
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
x = x - dx
y = y - dy
ha = 'center'
va = 'center'
self.xy[axis][i] = (x, y)
self.ha[axis][i] = ha
self.va[axis][i] = va
self._stale = False
def _get_bb(self, axis, i, renderer):
"""
Get the bounding box of an individual label. n.b. _set_xy_alignment()
must be called before this method.
"""
if self.text[axis][i] == '':
return
self.set_text(self.text[axis][i])
self.set_position(self.xy[axis][i])
self.set_ha(self.ha[axis][i])
self.set_va(self.va[axis][i])
return super().get_window_extent(renderer)
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self._set_xy_alignments(renderer, tick_out_size)
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# This implicitly sets the label text, position, alignment
bb = self._get_bb(axis, i, renderer)
if bb is None:
continue
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
bsd-3-clause
|
xiaoxiamii/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
127
|
40813
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
Ossada/DLS-UVVis
|
test_selektor.py
|
1
|
2198
|
from matplotlib.widgets import RectangleSelector
from matplotlib.path import _path
import numpy as np
import matplotlib.pyplot as plt
import os
from tkinter import filedialog
class SelectFromCollection(object):
"""Select indices from a matplotlib collection using `LassoSelector`.
Selected indices are saved in the `ind` attribute. This tool highlights
selected points by fading them out (i.e., reducing their alpha values).
If your collection has alpha < 1, this tool will permanently alter them.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : :class:`~matplotlib.axes.Axes`
Axes to interact with.
collection : :class:`matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to `alpha_other`.
"""
def __init__(self, ax, collection, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)
self.lasso = RectangleSelector(ax, onselect=self.onselect) # Sprememba glede na originalno kodo
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.lasso.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
if __name__ == 'main':
|
mit
|
GenericMappingTools/gmt-python
|
examples/gallery/images/track_sampling.py
|
1
|
1614
|
"""
Sampling along tracks
---------------------
The :func:`pygmt.grdtrack` function samples a raster grid's value along specified
points. We will need to input a 2D raster to ``grid`` which can be an
:class:`xarray.DataArray`. The argument passed to the ``points`` parameter can be a
:class:`pandas.DataFrame` table where the first two columns are x and y (or longitude
and latitude). Note also that there is a ``newcolname`` parameter that will be used to
name the new column of values sampled from the grid.
Alternatively, a NetCDF file path can be passed to ``grid``. An ASCII file path can
also be accepted for ``points``. To save an output ASCII file, a file name argument
needs to be passed to the ``outfile`` parameter.
"""
import pygmt
# Load sample grid and point datasets
grid = pygmt.datasets.load_earth_relief()
points = pygmt.datasets.load_ocean_ridge_points()
# Sample the bathymetry along the world's ocean ridges at specified track points
track = pygmt.grdtrack(points=points, grid=grid, newcolname="bathymetry")
fig = pygmt.Figure()
# Plot the earth relief grid on Cylindrical Stereographic projection, masking land areas
fig.basemap(region="g", projection="Cyl_stere/150/-20/15c", frame=True)
fig.grdimage(grid=grid, cmap="gray")
fig.coast(land="#666666")
# Plot the sampled bathymetry points using circles (c) of 0.15 cm size
# Points are colored using elevation values (normalized for visual purposes)
fig.plot(
x=track.longitude,
y=track.latitude,
style="c0.15c",
cmap="terra",
color=(track.bathymetry - track.bathymetry.mean()) / track.bathymetry.std(),
)
fig.show()
|
bsd-3-clause
|
georgyberdyshev/ascend
|
models/johnpye/fprops/cycle_plot.py
|
1
|
15015
|
# -*- coding: utf8 -*-
import extpy, sys
from solverreporter import *
from __builtin__ import *
import sys, os, os.path
sys.path.append(os.path.expanduser("~/ascend/models/johnpye/fprops/python"))
import fprops
try:
from pylab import *
except:
pass
#--- for (T,s) plots ---
def sat_curve(D):
Tt = D.T_t
Tc = D.T_c
TT = []
pp = []
ssf = []
ssg = []
for T in linspace(Tt,Tc,100):
# TODO this is inefficient because of repeated saturation solutions.
SF = D.set_Tx(T,0)
SG = D.set_Tx(T,1)
TT.append(SF.T - 273.15)
pp.append(SF.p)
ssf.append(SF.s/1.e3)
ssg.append(SG.s/1.e3)
plot(ssf,TT,"b--")
plot(ssg,TT,"r--")
class TSPoint:
def __init__(self,T,s):
self.T = T
self.s = s
def write(msg):
extpy.getbrowser().reporter.reportNote(msg)
def pconst(S1,S2,n):
"""Return a set of (T,s) points between two states, with pressure held constant."""
D = fprops.fluid(str(S1.cd.component.getSymbolValue()))
out = []
hh = linspace(float(S1.h), float(S2.h), n)
for h in hh:
S = D.set_ph(float(S1.p), h)
out += [TSPoint(S.T,S.s)]
return out
def plot_Ts(SS,style='b-'):
xx = []
yy = []
for S in SS:
yy.append(float(S.T) - 273.15)
xx.append(float(S.s)/1.e3)
plot(xx,yy,style)
#--- for (T,H) plots ---
class THPoint:
def __init__(self,T,h,mdot = 1.):
self.T = T
self.h = h
self.mdot = mdot
def pconsth(S1,S2,n):
"""Return a set of (T,H) points between two states, with pressure constant"""
D = fprops.fluid(str(S1.cd.component.getSymbolValue()))
out = []
hh = linspace(float(S1.h), float(S2.h), n)
mdot = float(S1.mdot)
for h in hh:
# TODO add try/except
S = D.set_ph(float(S1.p),h)
out += [THPoint(S.T,h * mdot)]
return out
def plot_TH(SS,style='b-',Href = 0):
xx = []
yy = []
for S in SS:
yy.append(float(S.T) - 273.15)
xx.append(((float(S.h)*float(S.mdot)) - Href)/1.e6)
plot(xx,yy,style)
#--- various Rankine cycle configurations ---
def cycle_plot_rankine(self):
"""Plot T-s diagram for a simple Rankine cycle"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
sat_curve(D)
boiler_curve = pconst(self.BO.inlet, self.BO.outlet,100)
condenser_curve = pconst(self.CO.inlet,self.CO.outlet,100)
SS = [self.PU.outlet, self.BO.inlet] + boiler_curve + [self.TU.inlet, self.TU.outlet] + condenser_curve + [self.CO.outlet, self.PU.outlet]
plot_Ts(SS)
title(unicode(r"Rankine cycle with %s" % D.name))
ylabel(unicode(r"T / [°C]"))
aa = axis(); axis([aa[0],aa[1],-100,600])
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/rankine.eps"))
def cycle_plot_rankine_reheat(self):
"""Plot T-s diagram for a reheat Rankine cycle"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
sat_curve(D)
boiler1_curve = pconst(self.BO1.inlet, self.BO1.outlet,100)
boiler2_curve = pconst(self.BO2.inlet, self.BO2.outlet,50)
condenser_curve = pconst(self.CO.inlet,self.CO.outlet,100)
SS = [self.PU.outlet, self.BO1.inlet] + \
boiler1_curve + [self.TU1.inlet, self.TU1.outlet] + \
boiler2_curve + [self.TU2.inlet, self.TU2.outlet] + \
condenser_curve + [self.CO.outlet, self.PU.outlet]
plot_Ts(SS)
plot_Ts(
[self.PU.inlet, self.BO1.inlet, self.TU1.inlet, self.BO2.inlet
,self.TU2.inlet, self.CO.inlet]
,'bo'
)
title(unicode(r"Reheat Rankine cycle with %s" % D.name))
ylabel(unicode(r"T / [°C]"))
aa = axis(); axis([aa[0],aa[1],-100,600])
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/rankine-reheat.eps"))
def cycle_plot_rankine_regen2(self):
"""Plot T-s diagram for a regenerative Rankine cycle (bleed steam regen)"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
sat_curve(D)
boiler_curve = pconst(self.BO.inlet, self.BO.outlet,100)
condenser_curve = pconst(self.CO.inlet,self.CO.outlet,100)
SS = [self.PU1.inlet, self.PU1.outlet] + \
pconst(self.HE.inlet, self.HE.outlet, 100) + \
[self.PU2.inlet, self.PU2.outlet] + \
boiler_curve + \
[self.TU1.inlet, self.TU1.outlet, self.TU2.outlet] + \
condenser_curve + [self.PU1.inlet]
plot_Ts(SS)
plot_Ts(
[self.PU1.inlet, self.PU1.outlet, self.HE.inlet, self.HE.outlet,
self.PU2.inlet, self.PU2.outlet, self.TU1.inlet, self.TU1.outlet,
self.TU2.outlet, self.PU1.inlet]
,'bo'
)
# line for the heat exchanger
plot_Ts(pconst(self.HE.inlet_heat, self.HE.outlet,100),'b:')
title(unicode(r"Regenerative Rankine cycle with %s" % D.name))
ylabel(unicode(r"T / [°C]"))
aa = axis(); axis([aa[0],aa[1],-100,600])
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/regen2.eps"))
def cycle_plot_rankine_regen1(self):
"""Plot T-s diagram for a regenerative Rankine cycle"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
sat_curve(D)
boiler_curve = pconst(self.BO.inlet, self.BO.outlet,100)
condenser_curve = pconst(self.CO.inlet,self.CO.outlet,100)
he_hot = pconst(self.HE.inlet_heat, self.HE.outlet_heat,100)
he_cold = pconst(self.HE.inlet, self.HE.outlet,100)
SS = [self.PU.outlet] + he_cold + [self.BO.inlet] + boiler_curve + [self.TU.inlet, self.TU.outlet] + he_hot + condenser_curve + [self.PU.inlet, self.PU.outlet]
plot_Ts(SS)
plot_Ts(
[self.PU.outlet,self.BO.inlet,self.TU.inlet, self.TU.outlet
,self.HE.outlet_heat, self.PU.inlet, self.PU.outlet]
,'bo'
)
# dotted lines for the heat exchanger
plot_Ts([self.HE.inlet_heat, self.HE.outlet],'b:')
plot_Ts([self.HE.outlet_heat, self.HE.inlet],'b:')
title(unicode(r"Regenerative Rankine cycle with %s" % D.name))
ylabel(unicode(r"T / [°C]"))
aa = axis(); axis([aa[0],aa[1],-100,600])
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/regen1.eps"))
#--- heat exchange (T,H) plot ---
def heater_closed_plot(self):
"""Plot T-H diagram of heat transfer in a heater_closed model"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
HE = self.HE
extpy.getbrowser().reporter.reportNote("Fluid is %s" % D.name)
plot_TH(pconsth(HE.inlet_heat, HE.outlet_heat, 50),'r-',
Href = (float(HE.outlet_heat.h)*float(HE.outlet_heat.mdot))\
)
plot_TH(pconsth(HE.inlet, HE.outlet, 50),'b-',
Href = (float(HE.inlet.h)*float(HE.inlet.mdot))\
)
title(unicode(r"Closed feedwater heater with %s" % D.name))
ylabel(unicode(r"T / [°C]"))
xlabel("H / [MW]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/heater_closed.eps"))
#--- the big one: a combined-cycle GT ---
def cycle_plot_ccgt(self):
"""Plot T-s diagram for combined-cycle gas turbine"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
D = fprops.fluid(str(self.cd_rankine.component.getSymbolValue()))
# plot gas turbine cycle
SS = [self.GC.inlet, self.GC.outlet, self.GT.inlet, self.GT.outlet, self.HE.inlet, self.HE.outlet, self.GC.inlet]
plot_Ts(SS,'g-')
plot_Ts(SS,'go')
hold(1)
sat_curve(D)
boiler_curve = pconst(self.HE.inlet_cold,self.HE.outlet_cold,100)
condenser_curve = pconst(self.CO.inlet,self.CO.outlet,100)
SS2 = [self.PU.outlet, self.HE.inlet_cold] + boiler_curve + [self.HE.outlet_cold, self.TU.inlet, self.TU.outlet, self.CO.inlet] + condenser_curve + [self.CO.outlet, self.PU.inlet, self.PU.outlet]
plot_Ts(SS2)
plot_Ts([self.PU.outlet, self.HE.inlet_cold,self.HE.outlet_cold, self.TU.inlet, self.TU.outlet, self.CO.inlet,self.CO.outlet, self.PU.inlet, self.PU.outlet],'bo')
title(unicode(r"Combined cycle with air and %s" % D.name))
ylabel(unicode(r"T / [°C]"))
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/ccgt.eps"))
#savefig(os.path.expanduser("~/Desktop/ccgt.png"))
#--- simple gas turbine models ---
def cycle_plot_brayton_regen(self):
"""Plot T-s diagran for regenerative gas turbine"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
D = fprops.fluid(str(self.cd.component.getSymbolValue()))
sat_curve(D)
# plot gas turbine cycle
regen_cold_curve = pconst(self.RE.inlet, self.RE.outlet, 50)
burner_curve = pconst(self.BO.inlet, self.BO.outlet,50)
regen_hot_curve = pconst(self.RE.inlet_hot, self.RE.outlet_hot, 50)
diss_curve = pconst(self.CO.inlet, self.CO.outlet,50)
SS = [self.PU.inlet, self.PU.outlet, self.RE.inlet] + regen_cold_curve + [self.RE.outlet, self.BO.inlet] + burner_curve + [self.BO.outlet, self.TU.inlet, self.TU.outlet,self.RE.inlet_hot] + regen_hot_curve + [self.RE.outlet_hot, self.CO.inlet] + diss_curve + [self.CO.outlet,self.PU.inlet]
plot_Ts(SS,'g-')
SS2 = [self.PU.inlet, self.PU.outlet, self.RE.inlet, self.RE.outlet, self.BO.inlet, self.BO.outlet, self.TU.inlet, self.TU.outlet,self.RE.inlet_hot, self.RE.outlet_hot, self.CO.inlet, self.CO.outlet,self.PU.inlet]
plot_Ts(SS2,'go')
SS3 = [self.RE.inlet, self.RE.outlet_hot]
plot_Ts(SS3,'g--')
SS4 = [self.RE.outlet, self.RE.inlet_hot]
plot_Ts(SS4,'g--')
hold(1)
title(unicode(r"Regenerative Brayton cycle"))
ylabel(unicode(r"T / [°C]"))
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/brayton_regen.eps"))
def cycle_plot_brayton_reheat_regen_intercool(self):
"""Plot T-s diagram for reheat-regenerative gas turbine"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()),str(self.cd.type.getSymbolValue()))
sat_curve(D)
# add some dots for the points in the cycle
seq = "CO1.inlet DI2.inlet CO2.inlet RE.inlet BU1.inlet BU2.inlet TU1.inlet BU2.inlet TU2.inlet RE.inlet_hot DI1.inlet".split(" ")
lalign = "TU2.inlet RE.inlet_hot DI1.inlet DI2.inlet CO1.inlet".split(" ")
SS1 = []; SS1a = []
for s in seq:
print "looking at '%s'"%s
p = reduce(getattr,s.split("."),self)
SS1.append(p)
SS1a.append((p,s))
plot_Ts(SS1,'go')
print "ANNOTATIONS"
for s in SS1a:
align = "right"
if s[1] in lalign:
align = "left"
annotate(s[1]+" ", xy =(float(s[0].s)/1.e3,float(s[0].T) - 273.15)
,horizontalalignment=align
)
# plot the cycle with smooth curves
BU1_curve = pconst(self.BU1.inlet, self.BU1.outlet,30)
BU2_curve = pconst(self.BU2.inlet, self.BU2.outlet,20)
DI1_curve = pconst(self.DI1.inlet,self.DI1.outlet,20)
DI2_curve = pconst(self.DI2.inlet,self.DI2.outlet,20)
REH_curve = pconst(self.RE.inlet_hot,self.RE.outlet_hot,50)
REL_curve = pconst(self.RE.inlet,self.RE.outlet,50)
SS2 = [self.CO1.inlet, self.CO1.outlet] + DI2_curve + [self.CO2.inlet, self.CO2.outlet] + REL_curve + BU1_curve + [self.TU1.inlet, self.TU1.outlet] + BU2_curve + [self.TU2.inlet, self.TU2.outlet] + REH_curve + DI1_curve + [self.CO1.inlet]
plot_Ts(SS2,'g-')
SS3 = [self.RE.inlet, self.RE.outlet_hot]
plot_Ts(SS3,'g--')
SS4 = [self.RE.outlet, self.RE.inlet_hot]
plot_Ts(SS4,'g--')
title(unicode(r"Reheat Regenerative Brayton cycle with Intercooling"))
ylabel(unicode(r"T / [°C]"))
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
def cycle_plot_brayton_split(self):
"""Plot T-s diagran for split-regeneration gas turbine"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd.component.getSymbolValue()),str(self.cd.type.getSymbolValue()))
sat_curve(D)
# add some dots for the points in the cycle
# seq = "CO2.inlet HEL.inlet HEL.outlet HEH.inlet BO.inlet TU.inlet HEH.inlet_hot HEL.inlet_hot CO1.inlet CO1.outlet".split(" ")
seq = "CO2.inlet HEL.inlet HEH.inlet BO.inlet TU.inlet HEH.inlet_hot HEH.outlet_hot CO1.inlet".split(" ")
lalign = "CO1.inlet HEH.outlet_hot ".split(" ")
SS1 = []; SS1a = []
for s in seq:
print "looking at '%s'"%s
p = reduce(getattr,s.split("."),self)
SS1.append(p)
SS1a.append((p,s))
plot_Ts(SS1,'go')
print "ANNOTATIONS"
for s in SS1a:
align = "right"
if s[1] in lalign:
align = "left"
annotate(s[1]+" ", xy =(float(s[0].s)/1.e3,float(s[0].T) - 273.15)
,horizontalalignment=align
)
SS2 = pconst(self.DI.inlet, self.DI.outlet, 50) + [self.CO2.inlet,self.CO2.outlet] + pconst(self.HEL.inlet,self.HEH.outlet,50) + pconst(self.BO.inlet,self.BO.outlet,50) + [self.TU.inlet, self.TU.outlet] + pconst(self.HEH.inlet_hot,self.HEL.outlet_hot,50) + [self.CO1.inlet,self.CO1.outlet]
plot_Ts(SS2,'g-')
SS3 = [self.HEL.inlet, self.HEL.outlet_hot]
plot_Ts(SS3,'g--')
SS4 = [self.HEL.outlet, self.HEL.inlet_hot]
plot_Ts(SS4,'g--')
SS5 = [self.HEH.inlet, self.HEH.outlet_hot]
plot_Ts(SS5,'g--')
SS6 = [self.HEH.outlet, self.HEH.inlet_hot]
plot_Ts(SS6,'g--')
title(unicode(r"Split Regenerative Brayton cycle"))
ylabel(unicode(r"T / [°C]"))
xlabel("s / [kJ/kg/K]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
savefig(os.path.expanduser("~/Desktop/brayton__split_regen.eps"))
#--- air-to-stream heat exchanger plot ---
def air_stream_heat_exchanger_plot(self):
"""Plot T-H diagram of heat transfer in a heater_closed model"""
import loading
loading.load_matplotlib(throw=True)
ioff()
figure()
hold(1)
D = fprops.fluid(str(self.cd_cold.component.getSymbolValue()))
n = self.n.getIntValue()
extpy.getbrowser().reporter.reportNote("Fluid is %s" % D.name)
# hot side is the air, calculated in the model
plot_TH( [self.H[i] for i in range(1+int(n))],'r-',\
Href = (float(self.outlet.h)*float(self.outlet.mdot))\
)
plot_TH(pconsth(self.inlet_cold, self.outlet_cold, 50),'b-',
Href = (float(self.inlet_cold.h)*float(self.inlet_cold.mdot))\
)
title(unicode(r"Combined-cycle air-%s heat exchanger" % D.name))
ylabel(unicode(r"T / [°C]"))
xlabel("H / [MW]")
extpy.getbrowser().reporter.reportNote("Plotting completed")
ion()
show()
#savefig(os.path.expanduser("~/Desktop/air_stream_heatex.eps"))
extpy.registermethod(cycle_plot_rankine)
extpy.registermethod(cycle_plot_rankine_reheat)
extpy.registermethod(cycle_plot_rankine_regen1)
extpy.registermethod(cycle_plot_rankine_regen2)
extpy.registermethod(cycle_plot_brayton_regen)
extpy.registermethod(cycle_plot_brayton_reheat_regen_intercool)
extpy.registermethod(cycle_plot_brayton_split)
extpy.registermethod(cycle_plot_ccgt)
extpy.registermethod(heater_closed_plot)
extpy.registermethod(air_stream_heat_exchanger_plot)
#the above method can be called using "EXTERNAL fourbarplot(SELF)" in ASCEND.
|
gpl-2.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/matplotlib/tests/test_bbox_tight.py
|
1
|
3110
|
from matplotlib import rcParams
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
import numpy as np
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'), tol=15)
def test_bbox_inches_tight():
"Test that a figure saved using bbox_inches'tight' is clipped right"
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[ 139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.array([0.0] * len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1,1)
for row in range(rows):
plt.bar(ind, data[row], width, bottom=yoff)
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc = (1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(list(range(10)), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, )
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(list(range(10)), list(range(10)))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
gpl-3.0
|
aarondewindt/paparazzi_torrap
|
sw/tools/calibration/calibration_utils.py
|
27
|
12769
|
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
|
gpl-2.0
|
cybernet14/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
ResearchComputing/myhat
|
example.py
|
2
|
1448
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# JANUS Web API
# <codecell>
from __future__ import print_function
import json
import requests
# <headingcell level=2>
# Query API database with `get` requests
# <codecell>
uri = 'http://localhost:5000/api/monte'
models = json.loads(requests.get(uri).text)
tmp = map(print, models['models'])
# <codecell>
uri = 'http://localhost:5000/api/monte/rengers'
jobs = json.loads(requests.get(uri).text)
tmp = map(print, jobs['jobs'])
# <headingcell level=2>
# Submit a job with `post`
# <codecell>
uri = 'http://localhost:5000/api/monte/rengers'
# <markdowncell>
# Define the model parameters
# <codecell>
params = dict()
params['pmmphr'] = 40
params['taucwepp'] = 6.2
params['hcelev'] = 2.0
params['time'] = 15
# <markdowncell>
# Call the model
# <codecell>
r = requests.post(uri, data=json.dumps(params))
print(r.text)
# <markdowncell>
# Print last 5 jobs
# <codecell>
jobs = json.loads(requests.get(uri).text)
ids = map(lambda x: x['id'], jobs['jobs'])
tmp = map(print, ids[-5:])
# <codecell>
ids[-1]
# <headingcell level=2>
# Check the output of a job
# <codecell>
uri = 'http://localhost:5000/api/monte/rengers/' + str(ids[-1])
r = requests.get(uri)
print(r.text)
# <codecell>
%matplotlib inline
import matplotlib.pyplot as plt
x = map( lambda x: float(x.strip()), r.text.split(',')[1:-1])
plt.plot(x)
plt.ylim(990,1025)
# <codecell>
|
mit
|
arunlodhi/pymc3
|
pymc3/glm/glm.py
|
14
|
5720
|
import numpy as np
from ..core import *
from ..distributions import *
from ..tuning.starting import find_MAP
import patsy
import theano
import pandas as pd
from collections import defaultdict
from pandas.tools.plotting import scatter_matrix
from . import families
def linear_component(formula, data, priors=None,
intercept_prior=None,
regressor_prior=None,
init_vals=None, family=None,
model=None):
"""Create linear model according to patsy specification.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : statsmodels.family
Link function to pass to statsmodels (init has to be True).
See `statsmodels.api.families`
Default: identity
Output
------
(y_est, coeffs) : Estimate for y, list of coefficients
Example
-------
# Logistic regression
y_est, coeffs = glm('male ~ height + weight',
htwt_data,
family=glm.families.Binomial(link=glm.family.logit))
y_data = Bernoulli('y', y_est, observed=data.male)
"""
if intercept_prior is None:
intercept_prior = Normal.dist(mu=0, tau=1.0E-12)
if regressor_prior is None:
regressor_prior = Normal.dist(mu=0, tau=1.0E-12)
if priors is None:
priors = defaultdict(None)
# Build patsy design matrix and get regressor names.
_, dmatrix = patsy.dmatrices(formula, data)
reg_names = dmatrix.design_info.column_names
if init_vals is None:
init_vals = {}
# Create individual coefficients
model = modelcontext(model)
coeffs = []
if reg_names[0] == 'Intercept':
prior = priors.get('Intercept', intercept_prior)
coeff = model.Var(reg_names.pop(0), prior)
if 'Intercept' in init_vals:
coeff.tag.test_value = init_vals['Intercept']
coeffs.append(coeff)
for reg_name in reg_names:
prior = priors.get(reg_name, regressor_prior)
coeff = model.Var(reg_name, prior)
if reg_name in init_vals:
coeff.tag.test_value = init_vals[reg_name]
coeffs.append(coeff)
y_est = theano.dot(np.asarray(dmatrix), theano.tensor.stack(*coeffs)).reshape((1, -1))
return y_est, coeffs
def glm(*args, **kwargs):
"""Create GLM after Patsy model specification string.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : Family object
Distribution of likelihood, see pymc3.glm.families
(init has to be True).
Output
------
vars : List of created random variables (y_est, coefficients etc)
Example
-------
# Logistic regression
vars = glm('male ~ height + weight',
data,
family=glm.families.Binomial(link=glm.families.logit))
"""
model = modelcontext(kwargs.get('model'))
family = kwargs.pop('family', families.Normal())
call_find_map = kwargs.pop('find_MAP', True)
formula = args[0]
data = args[1]
y_data = np.asarray(patsy.dmatrices(formula, data)[0]).T
y_est, coeffs = linear_component(*args, **kwargs)
family.create_likelihood(y_est, y_data)
return [y_est] + coeffs
def plot_posterior_predictive(trace, eval=None, lm=None, samples=30, **kwargs):
"""Plot posterior predictive of a linear model.
:Arguments:
trace : <array>
Array of posterior samples with columns
eval : <array>
Array over which to evaluate lm
lm : function <default: linear function>
Function mapping parameters at different points
to their respective outputs.
input: point, sample
output: estimated value
samples : int <default=30>
How many posterior samples to draw.
Additional keyword arguments are passed to pylab.plot().
"""
import matplotlib.pyplot as plt
if lm is None:
lm = lambda x, sample: sample['Intercept'] + sample['x'] * x
if eval is None:
eval = np.linspace(0, 1, 100)
# Set default plotting arguments
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = .2
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
for rand_loc in np.random.randint(0, len(trace), samples):
rand_sample = trace[rand_loc]
plt.plot(eval, lm(eval, rand_sample), **kwargs)
# Make sure to not plot label multiple times
kwargs.pop('label', None)
plt.title('Posterior predictive')
|
apache-2.0
|
worldofchris/jlf
|
jlf_stats/metrics.py
|
1
|
12504
|
"""
Metrics
"""
from jlf_stats.fogbugz_wrapper import FogbugzWrapper
from jlf_stats.jira_wrapper import JiraWrapper
import pandas as pd
import numpy as np
import math
import exceptions
from bucket import bucket_labels
from index import fill_date_index_blanks, week_start_date
from history import arrivals, history_from_state_transitions
import re
import os
import json
class Metrics(object):
def __init__(self, config):
self.source = None
self.work_items = None
self.states = []
self.config = config
if config['source']['type'] == 'fogbugz':
self.source = FogbugzWrapper(self.config)
elif config['source']['type'] == 'jira':
m = re.match("^ENV\(([^\']+)\)", self.config['source']['authentication']['password'])
if m is not None:
self.config['source']['authentication']['password'] = os.environ.get(m.group(1), 'undefined')
self.source = JiraWrapper(self.config)
if 'throughput_dow' in config:
self.throughput_dow = config['throughput_dow']
else:
self.throughput_dow = 4
try:
self.types = config['types']
self.counts_towards_throughput = config['counts_towards_throughput']
except KeyError as e:
raise exceptions.MissingConfigItem(e.message, "Missing Config Item:{0}".format(e.message))
# Optional
try:
self.states = config['states']
self.states.append(None)
except KeyError:
pass
def work_item(self, id):
"""
Get an individual work item.
This is a case in FogBugz or an Issue in Jira.
"""
if self.work_items is None:
self.work_items = self.source.work_items()
matches = [work_item for work_item in self.work_items if work_item.id == id]
return matches[0]
def details(self, fields=None):
if self.work_items is None:
self.work_items = self.source.work_items()
details = []
for work_item in self.work_items:
details.append(work_item.detail())
df = pd.DataFrame(details)
if fields is None:
return df
else:
return df.filter(fields)
def history(self, from_date=None, until_date=None, types=None):
if self.work_items is None:
self.work_items = self.source.work_items()
history = {}
for work_item in self.work_items:
if types is None:
# HACK HACK HACK
# Also need some consistency around thing_date and date_thing
if isinstance(self.source, JiraWrapper):
history[work_item.id] = work_item.history
else:
history[work_item.id] = history_from_state_transitions(work_item.date_created.date(), work_item.history, until_date)
else:
for type_grouping in types:
if work_item.type in self.types[type_grouping]:
if isinstance(self.source, JiraWrapper):
history[work_item.id] = work_item.history
else:
history[work_item.id] = history_from_state_transitions(work_item.date_created.date(), work_item.history, until_date)
if history is not None:
df = pd.DataFrame(history)
return df
return None
def throughput(self,
from_date,
to_date,
cumulative=True,
category=None,
types=None):
"""
Return throughput for all work_items in a state where they are considered
to count towards throughput.
Might want to add some additional conditions other than state but state
allows us the most options as to where to place the 'finishing line'
"""
work_item_history = self.history(from_date, to_date)
work_item_rows = []
for work_item_key in work_item_history:
work_item = self.work_item(work_item_key)
if category is not None:
if category != work_item.category:
continue
swimlane = work_item.category
# Are we grouping by work type?
if types is not None:
for type_grouping in types:
if work_item.type in self.types[type_grouping]:
swimlane = swimlane + '-' + type_grouping
else:
continue
# print "Not counting " + f.work_itemtype.name
if swimlane == work_item.category:
continue
for day, state in work_item_history[work_item_key].iteritems():
if day.weekday() == self.throughput_dow:
if state in self.counts_towards_throughput:
work_item_row = {'swimlane': swimlane,
'id': work_item_key,
'week': day,
'count': 1}
work_item_rows.append(work_item_row)
df = pd.DataFrame(work_item_rows)
if len(df.index) > 0:
table = pd.pivot_table(df, index=['week'], columns=['swimlane'], values='count', aggfunc=np.count_nonzero)
if cumulative:
return table
else:
def de_cumulative(x):
for i in range(x.size-1, 0, -1):
x[i] = x[i] - x[i-1]
return x
table = table.apply(de_cumulative)
return table
else:
return None
def cfd(self, from_date=None, until_date=None, types=None):
"""
Cumulative Flow Diagram
"""
cfd = self.history(from_date, until_date, types)
days = {}
for day in cfd.index:
tickets = []
for ticket in cfd.ix[day]:
tickets.append(ticket)
def state_order(state):
try:
return self.states.index(state)
except ValueError:
if type(state) == float:
if math.isnan(state):
return -1
if type(state) == np.float64:
if math.isnan(state):
return -1
raise exceptions.MissingState(state, "Missing state:{0}".format(state))
days[day] = sorted(tickets, key=state_order)
return pd.DataFrame(days)
def cycle_time_histogram(self,
cycle,
types=None,
buckets=None):
"""
Time taken for work to complete one or more 'cycles' - i.e. transitions from a start state to an end state
"""
if self.work_items is None:
self.work_items = self.source.work_items()
cycle_time_data = {}
for work_item in self.work_items:
if types is not None:
include = False
for type_grouping in types:
if work_item.type in self.types[type_grouping]:
include = True
key = "{0}-{1}".format(type_grouping, cycle)
break
if not include:
continue
else:
key = cycle
try:
if work_item.cycles[cycle] is not None:
if key not in cycle_time_data:
cycle_time_data[key] = [work_item.cycles[cycle]]
else:
cycle_time_data[key].append(work_item.cycles[cycle])
except KeyError:
continue
histogram = None
for cycle in cycle_time_data:
if buckets is not None:
try:
li = buckets.index('max')
buckets[li] = max(cycle_time_data[cycle])
except ValueError:
pass
labels = bucket_labels(buckets)
count, division = np.histogram(cycle_time_data[cycle], bins=buckets)
else:
count, division = np.histogram(cycle_time_data[cycle])
labels = bucket_labels(division)
cycle_histogram = pd.DataFrame(count, index=labels, columns=[cycle])
cycle_histogram.index.name = 'bucket'
if histogram is None:
histogram = cycle_histogram.copy(deep=True)
else:
old_histogram = histogram.copy(deep=True)
histogram = old_histogram.join(cycle_histogram, how='outer')
return histogram
def demand(self,
from_date,
to_date,
types=None):
"""
Return the number of issues created each week - i.e. the demand on the system
"""
if self.work_items is None:
self.work_items = self.source.work_items()
details = []
for work_item in self.work_items:
detail = work_item.detail()
detail['count'] = 1 # Need to figure out where to put this
# resolution_date_str = f.resolutiondate
# if resolution_date_str is not None:
# resolution_date = datetime.strptime(resolution_date_str[:10],
# '%Y-%m-%d')
# week = week_start_date(resolution_date.isocalendar()[0],
# resolution_date.isocalendar()[1]).strftime('%Y-%m-%d')
# else:
# week = None
detail['week_created'] = week_start_date(detail['date_created'].isocalendar()[0],
detail['date_created'].isocalendar()[1]).strftime('%Y-%m-%d')
include = True
swimlane = work_item.category
if types is not None and self.types is not None:
include = False
for type_grouping in types:
if work_item.type in self.types[type_grouping]:
swimlane = swimlane + '-' + type_grouping
include = True
detail['swimlane'] = swimlane
if include:
details.append(detail)
df = pd.DataFrame(details)
table = pd.pivot_table(df, index=['week_created'], columns=['swimlane'], values='count', aggfunc=np.count_nonzero)
reindexed = table.reindex(index=fill_date_index_blanks(table.index), fill_value=np.int64(0))
reindexed.index.name = "week"
return reindexed
def arrival_rate(self,
from_date,
to_date):
"""
So that we can get an idea of the flow of work that has not been completed and so does not have a resolution date
and so does not count towards throughput, what is the rate at which that work arrived at states further up the
value chain?
"""
if self.work_items is None:
self.work_items = self.source.work_items()
arrivals_count = {}
for work_item in self.work_items:
try:
arrivals_count = arrivals(work_item.history, arrivals_count)
except AttributeError as e:
print e
df = pd.DataFrame.from_dict(arrivals_count, orient='index')
df.index = pd.to_datetime(df.index)
wf = df.resample('W-MON', how='sum')
return wf
def save_work_items(self, filename=None):
if filename is None:
if 'name' in self.config['name']:
filename = self.config['name'] + '.json'
else:
filename = 'local.json'
if self.work_items is None:
self.work_items = self.source.work_items()
output = []
for item in self.work_items:
# This is so wrong. We are decoding then encoding then decoding again...
output.append(json.loads(item.to_JSON()))
with open(filename, 'w') as outfile:
json.dump(output, outfile, indent=4, sort_keys=True)
|
bsd-2-clause
|
PrashntS/scikit-learn
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
305
|
4121
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
|
bsd-3-clause
|
adelq/thermopy
|
setup.py
|
2
|
1354
|
from setuptools import setup, find_packages
import thermochem
setup(
name="thermochem",
version=thermochem.__version__,
description="Python utilities for thermodynamics and thermochemistry",
long_description=open("README.rst").read(),
author="Adel Qalieh",
author_email="[email protected]",
url="https://github.com/adelq/thermochem",
license="BSD",
packages=find_packages(),
include_package_data=True,
install_requires=['scipy>=0.6.0', 'numpy>=1.2.1', 'pandas>=0.17.0'],
zip_safe=False,
keywords='thermo chemistry physics',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics'
],
)
|
bsd-3-clause
|
UCSD-CCAL/ccal
|
ccal/hierarchical_consensus_cluster_with_ks.py
|
1
|
3518
|
from pandas import DataFrame, Index
from scipy.spatial.distance import pdist, squareform
from .establish_path import establish_path
from .hierarchical_consensus_cluster import hierarchical_consensus_cluster
from .multiprocess import multiprocess
from .plot_heat_map import plot_heat_map
from .plot_points import plot_points
def hierarchical_consensus_cluster_with_ks(
df,
ks,
n_job=1,
distance__column_x_column=None,
distance_function="euclidean",
n_clustering=10,
random_seed=20121020,
linkage_method="ward",
plot_df=True,
directory_path=None,
):
if directory_path is None:
k_directory_paths = tuple(None for k in ks)
else:
k_directory_paths = tuple("{}/{}".format(directory_path, k) for k in ks)
for k_directory_path in k_directory_paths:
establish_path(k_directory_path, "directory")
k_return = {}
if distance__column_x_column is None:
print("Computing distance with {} ...".format(distance_function))
distance__column_x_column = DataFrame(
squareform(pdist(df.values.T, distance_function)),
index=df.columns,
columns=df.columns,
)
if directory_path is not None:
distance__column_x_column.to_csv(
"{}/distance.column_x_column.tsv".format(directory_path), sep="\t"
)
for k, (column_cluster, column_cluster__ccc) in zip(
ks,
multiprocess(
hierarchical_consensus_cluster,
(
(
df,
k,
distance__column_x_column,
None,
n_clustering,
random_seed,
linkage_method,
plot_df,
k_directory_path,
)
for k, k_directory_path in zip(ks, k_directory_paths)
),
n_job=n_job,
),
):
k_return["K{}".format(k)] = {
"column_cluster": column_cluster,
"column_cluster.ccc": column_cluster__ccc,
}
keys = Index(("K{}".format(k) for k in ks), name="K")
file_name = "hcc.column_cluster.ccc.html"
if directory_path is None:
html_file_path = None
else:
html_file_path = "{}/{}".format(directory_path, file_name)
plot_points(
(ks,),
(tuple(k_return[key]["column_cluster.ccc"] for key in keys),),
names=("Column Cluster CCC",),
modes=("lines+markers",),
title="HCC Column Cluster CCC",
xaxis_title="K",
yaxis_title="CCC",
html_file_path=html_file_path,
)
k_x_column = DataFrame(
[k_return[key]["column_cluster"] for key in keys],
index=keys,
columns=df.columns,
)
if directory_path is not None:
k_x_column.to_csv("{}/hcc.k_x_column.tsv".format(directory_path), sep="\t")
if plot_df:
file_name = "hcc.k_x_column.distribution.html"
if directory_path is None:
html_file_path = None
else:
html_file_path = "{}/{}".format(directory_path, file_name)
plot_heat_map(
k_x_column,
sort_axis=1,
colorscale="COLOR_CATEGORICAL",
title="HCC Column Cluster Distribution",
xaxis_title=k_x_column.columns.name,
yaxis_title=k_x_column.index.name,
html_file_path=html_file_path,
)
return k_return
|
mit
|
jat255/hyperspy
|
hyperspy/drawing/signal1d.py
|
2
|
20469
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import logging
import inspect
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
from hyperspy.events import Event, Events
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.misc.test_utils import ignore_warning
_logger = logging.getLogger(__name__)
class Signal1DFigure(BlittedFigure):
"""
"""
def __init__(self, title=""):
super(Signal1DFigure, self).__init__()
self.figure = None
self.ax = None
self.right_ax = None
self.ax_lines = list()
self.right_ax_lines = list()
self.axes_manager = None
self.right_axes_manager = None
# Labels
self.xlabel = ''
self.ylabel = ''
self.title = title
self.create_figure()
self.create_axis()
# Color cycles
self._color_cycles = {
'line': utils.ColorCycle(),
'step': utils.ColorCycle(),
'scatter': utils.ColorCycle(), }
def create_axis(self):
self.ax = self.figure.add_subplot(111)
animated = self.figure.canvas.supports_blit
self.ax.yaxis.set_animated(animated)
self.ax.xaxis.set_animated(animated)
self.ax.hspy_fig = self
def create_right_axis(self, color='black'):
if self.ax is None:
self.create_axis()
if self.right_ax is None:
self.right_ax = self.ax.twinx()
self.right_ax.hspy_fig = self
self.right_ax.yaxis.set_animated(self.figure.canvas.supports_blit)
self.right_ax.tick_params(axis='y', labelcolor=color)
plt.tight_layout()
def close_right_axis(self):
if self.right_ax is not None:
for lines in self.right_ax_lines:
lines.close()
self.right_ax.axes.get_yaxis().set_visible(False)
self.right_ax = None
def add_line(self, line, ax='left', connect_navigation=False):
"""
Add Signal1DLine to figure
Parameters
----------
line : Signal1DLine object
Line to be added to the figure.
ax : {'left', 'right'}, optional
Position the y axis, either 'left'. The default is 'left'.
connect_navigation : bool, optional
Connect the update of the line to the `indices_changed` event of
the axes_manager. This only necessary when adding a line to the
left since the `indices_changed` event is already connected to
the `update` method of `Signal1DFigure`. The default is False.
Returns
-------
None.
"""
if ax == 'left':
line.ax = self.ax
if line.axes_manager is None:
line.axes_manager = self.axes_manager
self.ax_lines.append(line)
line.sf_lines = self.ax_lines
elif ax == 'right':
line.ax = self.right_ax
self.right_ax_lines.append(line)
line.sf_lines = self.right_ax_lines
if line.axes_manager is None:
line.axes_manager = self.right_axes_manager
if connect_navigation:
line.axes_manager.events.indices_changed.connect(
line._auto_update_line, [])
line.events.closed.connect(
lambda: line.axes_manager.events.indices_changed.disconnect(
line._auto_update_line), [])
line.axis = self.axis
# Automatically asign the color if not defined
if line.color is None:
line.color = self._color_cycles[line.type]()
# Or remove it from the color cycle if part of the cycle
# in this round
else:
rgba_color = mpl.colors.colorConverter.to_rgba(line.color)
if rgba_color in self._color_cycles[line.type].color_cycle:
self._color_cycles[line.type].color_cycle.remove(
rgba_color)
def plot(self, data_function_kwargs={}, **kwargs):
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.set_title(self.title)
x_axis_upper_lims = []
x_axis_lower_lims = []
for line in self.ax_lines:
line.plot(data_function_kwargs=data_function_kwargs, **kwargs)
x_axis_lower_lims.append(line.axis.axis[0])
x_axis_upper_lims.append(line.axis.axis[-1])
for marker in self.ax_markers:
marker.plot(render_figure=False)
plt.xlim(np.min(x_axis_lower_lims), np.max(x_axis_upper_lims))
self.axes_manager.events.indices_changed.connect(self.update, [])
self.events.closed.connect(
lambda: self.axes_manager.events.indices_changed.disconnect(
self.update), [])
self.ax.figure.canvas.draw_idle()
if hasattr(self.figure, 'tight_layout'):
try:
self.figure.tight_layout()
except BaseException:
# tight_layout is a bit brittle, we do this just in case it
# complains
pass
self.figure.canvas.draw()
def _on_close(self):
_logger.debug('Closing Signal1DFigure.')
if self.figure is None:
return # Already closed
for line in self.ax_lines + self.right_ax_lines:
line.close()
super(Signal1DFigure, self)._on_close()
_logger.debug('Signal1DFigure Closed.')
def update(self):
"""
Update lines, markers and render at the end.
This method is connected to the `indices_changed` event of the
`axes_manager`.
"""
def update_lines(ax, ax_lines):
y_min, y_max = np.nan, np.nan
for line in ax_lines:
# save on figure rendering and do it at the end
# don't update the y limits
line._auto_update_line(render_figure=False,
update_ylimits=False)
y_min = np.nanmin([y_min, line._y_min])
y_max = np.nanmax([y_max, line._y_max])
ax.set_ylim(y_min, y_max)
for marker in self.ax_markers:
marker.update()
# Left and right axis needs to be updated separetely to set the
# correct y limits of each axes
update_lines(self.ax, self.ax_lines)
if self.right_ax is not None:
update_lines(self.right_ax, self.right_ax_lines)
if self.ax.figure.canvas.supports_blit:
self.ax.hspy_fig._update_animated()
else:
self.ax.figure.canvas.draw_idle()
class Signal1DLine(object):
"""Line that can be added to Signal1DFigure.
Attributes
----------
type : {'scatter', 'step', 'line'}
Select the line drawing style.
line_properties : dictionary
Accepts a dictionary of valid (i.e. recognized by mpl.plot)
containing valid line properties. In addition it understands
the keyword `type` that can take the following values:
{'scatter', 'step', 'line'}
auto_update: bool
If False, executing ``_auto_update_line`` does not update the
line plot.
Methods
-------
set_line_properties
Enables setting the line_properties attribute using keyword
arguments.
Raises
------
ValueError
If an invalid keyword value is passed to line_properties.
"""
def __init__(self):
self.events = Events()
self.events.closed = Event("""
Event that triggers when the line is closed.
Arguments:
obj: Signal1DLine instance
The instance that triggered the event.
""", arguments=["obj"])
self.sf_lines = None
self.ax = None
# Data attributes
self.data_function = None
# args to pass to `__call__`
self.data_function_kwargs = {}
self.axis = None
self.axes_manager = None
self._plot_imag = False
self.norm = 'linear'
# Properties
self.auto_update = True
self.autoscale = 'v'
self._y_min = np.nan
self._y_max = np.nan
self.line = None
self.plot_indices = False
self.text = None
self.text_position = (-0.1, 1.05,)
self._line_properties = {}
self.type = "line"
@property
def get_complex(self):
warnings.warn("The `get_complex` attribute is deprecated and will be"
"removed in 2.0, please use `_plot_imag` instead.",
VisibleDeprecationWarning)
return self._plot_imag
@property
def line_properties(self):
return self._line_properties
@line_properties.setter
def line_properties(self, kwargs):
if 'type' in kwargs:
self.type = kwargs['type']
del kwargs['type']
if 'color' in kwargs:
color = kwargs['color']
del kwargs['color']
self.color = color
for key, item in kwargs.items():
if item is None and key in self._line_properties:
del self._line_properties[key]
else:
self._line_properties[key] = item
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw_idle()
def set_line_properties(self, **kwargs):
self.line_properties = kwargs
@property
def type(self):
return self._type
@type.setter
def type(self, value):
lp = {}
if value == 'scatter':
lp['marker'] = 'o'
lp['linestyle'] = 'None'
lp['markersize'] = 1
elif value == 'line':
lp['linestyle'] = '-'
lp['marker'] = "None"
lp['drawstyle'] = "default"
elif value == 'step':
lp['drawstyle'] = 'steps-mid'
lp['marker'] = "None"
else:
raise ValueError(
"`type` must be one of "
"{\'scatter\', \'line\', \'step\'}"
"but %s was given" % value)
self._type = value
self.line_properties = lp
if self.color is not None:
self.color = self.color
@property
def color(self):
if 'color' in self.line_properties:
return self.line_properties['color']
elif 'markeredgecolor' in self.line_properties:
return self.line_properties['markeredgecolor']
else:
return None
@color.setter
def color(self, color):
if self._type == 'scatter':
self.set_line_properties(markeredgecolor=color)
if 'color' in self._line_properties:
del self._line_properties['color']
else:
if color is None and 'color' in self._line_properties:
del self._line_properties['color']
else:
self._line_properties['color'] = color
self.set_line_properties(markeredgecolor=None)
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw_idle()
def plot(self, data=1, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
data = self._get_data()
if self.line is not None:
self.line.remove()
norm = self.norm
if norm == 'log':
plot = self.ax.semilogy
elif (isinstance(norm, mpl.colors.Normalize) or
(inspect.isclass(norm) and issubclass(norm, mpl.colors.Normalize))
):
raise ValueError("Matplotlib Normalize instance or subclass can "
"be used for Signal2D only.")
elif norm not in ["auto", "linear"]:
raise ValueError("`norm` paramater should be 'auto', 'linear' or "
"'log' for Signal1D.")
else:
plot = self.ax.plot
self.line, = plot(self.axis.axis, data, **self.line_properties)
self.line.set_animated(self.ax.figure.canvas.supports_blit)
if not self.axes_manager or self.axes_manager.navigation_size == 0:
self.plot_indices = False
if self.plot_indices is True:
if self.text is not None:
self.text.remove()
self.text = self.ax.text(*self.text_position,
s=str(self.axes_manager.indices),
transform=self.ax.transAxes,
fontsize=12,
color=self.line.get_color(),
animated=self.ax.figure.canvas.supports_blit)
self._y_min, self._y_max = self.ax.get_ylim()
self.ax.figure.canvas.draw_idle()
def _get_data(self, real_part=False):
if self._plot_imag and not real_part:
ydata = self.data_function(axes_manager=self.axes_manager,
**self.data_function_kwargs).imag
else:
ydata = self.data_function(axes_manager=self.axes_manager,
**self.data_function_kwargs).real
return ydata
def _auto_update_line(self, update_ylimits=True, **kwargs):
"""Updates the line plot only if `auto_update` is `True`.
This is useful to connect to events that automatically update the line.
"""
if self.auto_update:
if 'render_figure' not in kwargs.keys():
# if markers are plotted, we don't render the figure now but
# once the markers have been updated
kwargs['render_figure'] = (
len(self.ax.hspy_fig.ax_markers) == 0)
self.update(self, update_ylimits=update_ylimits, **kwargs)
def update(self, force_replot=False, render_figure=True,
update_ylimits=False):
"""Update the current spectrum figure
Parameters:
-----------
force_replot : bool
If True, close and open the figure. Default is False.
render_figure : bool
If True, render the figure. Useful to avoid firing matplotlib
drawing events too often. Default is True.
update_ylimits : bool
If True, update the y-limits. This is useful to avoid the figure
flickering when different lines update the y-limits consecutively,
in which case, this is done in `Signal1DFigure.update`.
Default is False.
"""
if force_replot is True:
self.close()
self.plot(data_function_kwargs=self.data_function_kwargs,
norm=self.norm)
self._y_min, self._y_max = self.ax.get_ylim()
ydata = self._get_data()
old_xaxis = self.line.get_xdata()
if len(old_xaxis) != self.axis.size or \
np.any(np.not_equal(old_xaxis, self.axis.axis)):
self.line.set_data(self.axis.axis, ydata)
else:
self.line.set_ydata(ydata)
if 'x' in self.autoscale:
self.ax.set_xlim(self.axis.axis[0], self.axis.axis[-1])
if 'v' in self.autoscale:
self.ax.relim()
y1, y2 = np.searchsorted(self.axis.axis,
self.ax.get_xbound())
y2 += 2
y1, y2 = np.clip((y1, y2), 0, len(ydata - 1))
clipped_ydata = ydata[y1:y2]
with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_max, y_min = (np.nanmax(clipped_ydata),
np.nanmin(clipped_ydata))
if self._plot_imag:
# Add real plot
yreal = self._get_data(real_part=True)
clipped_yreal = yreal[y1:y2]
with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_min = min(y_min, np.nanmin(clipped_yreal))
y_max = max(y_max, np.nanmin(clipped_yreal))
if y_min == y_max:
# To avoid matplotlib UserWarning when calling `set_ylim`
y_min, y_max = y_min - 0.1, y_max + 0.1
if not np.isfinite(y_min):
y_min = None # data are -inf or all NaN
if not np.isfinite(y_max):
y_max = None # data are inf or all NaN
if y_min is not None:
self._y_min = y_min
if y_max is not None:
self._y_max = y_max
if update_ylimits:
# Most of the time, we don't want to call `set_ylim` now to
# avoid flickering of the figure. However, we use the values
# `self._y_min` and `self._y_max` in `Signal1DFigure.update`
self.ax.set_ylim(self._y_min, self._y_max)
if self.plot_indices is True:
self.text.set_text(self.axes_manager.indices)
if render_figure:
if self.ax.figure.canvas.supports_blit:
self.ax.hspy_fig._update_animated()
else:
self.ax.figure.canvas.draw_idle()
def close(self):
_logger.debug('Closing `Signal1DLine`.')
if self.line in self.ax.lines:
self.ax.lines.remove(self.line)
if self.text and self.text in self.ax.texts:
self.ax.texts.remove(self.text)
if self.sf_lines and self in self.sf_lines:
self.sf_lines.remove(self)
self.events.closed.trigger(obj=self)
for f in self.events.closed.connected:
self.events.closed.disconnect(f)
try:
self.ax.figure.canvas.draw_idle()
except BaseException:
pass
_logger.debug('`Signal1DLine` closed.')
def _plot_component(factors, idx, ax=None, cal_axis=None,
comp_label='PC'):
if ax is None:
ax = plt.gca()
if cal_axis is not None:
x = cal_axis.axis
plt.xlabel(cal_axis.units)
else:
x = np.arange(factors.shape[0])
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%s %i' % (comp_label, idx))
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label='PC', no_nans=True, calibrate=True,
cmap=plt.cm.gray):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes_manager._axes[0].low_value,
axes_manager._axes[0].high_value,
axes_manager._axes[1].high_value,
axes_manager._axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape), cmap=cmap, extent=extent,
interpolation='nearest')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes_manager._axes[0].axis
else:
x = np.arange(axes_manager._axes[0].size)
ax.step(x, loadings[idx])
else:
raise ValueError('View not supported')
|
gpl-3.0
|
harshaneelhg/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
laurent-george/bokeh
|
bokeh/charts/builder/line_builder.py
|
43
|
5360
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
import numpy as np
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line as LineGlyph
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Line(values, index=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
return create_and_build(LineBuilder, values, index=index, **kws)
class LineBuilder(Builder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
xs = self._values_index
self.set_and_get("x", "", np.array(xs))
for col, values in self._values.items():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
for i, duplet in enumerate(self._attr[1:], start=1):
glyph = LineGlyph(x='x', y=duplet, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
|
bsd-3-clause
|
djgagne/scikit-learn
|
sklearn/linear_model/randomized_l1.py
|
68
|
23405
|
"""
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
|
bsd-3-clause
|
roshantha9/AbstractManycoreSim
|
src/analyse_results/AnalyseResults_Exp_HRTVidMultiScenario.py
|
1
|
17739
|
import sys, os, csv, pprint, math
from collections import OrderedDict
import numpy as np
import random
import shutil
import math
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
import itertools
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import json
from operator import itemgetter
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
from SimParams import SimParams
MAX_NUM_GOPS_IN_VS = SimParams.WFGEN_MAX_GOPS_PER_VID
NOC_H = 3
NOC_W = 3
FNAME_PREFIX_LIST = [
("NOAC_LUM", "NOAC_LUM"),
("NOCAC_IMPROVEDM", "NOCAC_RMM"),
("SCHEDONLYAC_LUM", "DETERM_LUM"),
("SCHEDONLYAC_IMPROVEDM", "DETERM_RMM")
]
SCENARIO_LIST = [
("SCV1_", "scenario_1"),
("SCV2_", "scenario_2"),
("SCV3_", "scenario_3"),
("SCV4_", "scenario_4")
]
EXP_DATA_DIR = "../experiment_data/hrt_video/"
def plot_FlowsResponseTime(use_sched_test_results = False):
all_scenario_all_actypes_distributions = OrderedDict()
all_distributions = []
for each_scenario in SCENARIO_LIST:
#print each_scenario
for each_fname_prefix in FNAME_PREFIX_LIST:
if("SCH" in each_fname_prefix[0]) and (use_sched_test_results == True): # if it's a schedulability test, we have to get the analytical results
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__schedtestresults.js"
json_data=open(fname)
flow_data = json.load(json_data)
sorted_flow_data_response_times = flow_data[-1]["current_allflows_wcrt_distribution"]
else:
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__flwcompleted.js"
json_data=open(fname)
flow_data = json.load(json_data)
# sort them according to id
sorted_flow_data = sorted(flow_data, key=itemgetter('id'))
sorted_flow_data_response_times = [float(x['et']-x['st']) for x in sorted_flow_data if x['type'] == 1]
all_distributions.append(np.array(sorted_flow_data_response_times))
#print each_fname_prefix
if (each_scenario not in all_scenario_all_actypes_distributions):
all_scenario_all_actypes_distributions[each_scenario] = OrderedDict()
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_flow_data_response_times
else:
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_flow_data_response_times
# plot all ac types flow-response-times for all ac-types
fig = plt.figure()
fig.canvas.set_window_title('plot_FlowsResponseTime')
ax = plt.subplot(111)
dist_sizes = [len(x) for x in all_distributions]
pprint.pprint(dist_sizes)
dist_means = [np.mean(x) for x in all_distributions]
dist_max = [np.max(x) for x in all_distributions]
pprint.pprint(dist_means)
# plot a group of boxplots for each scenario
width=0.3
ind = np.arange(len(all_distributions))
ind_means = np.arange(len(dist_means))
ind_max = np.arange(len(dist_max))
pos_0 = ind
ax.boxplot(all_distributions,0,'', whis=3, positions=pos_0, widths=0.3)
ax.plot(ind_means, dist_means, marker='d', color='g', linewidth=0.3)
ax.plot(ind_max, dist_max, marker='x', color='r', linewidth=0.3)
#linestyle
# put up vertical lines - seperating the different scenarios
for ix, each_scenario in enumerate(SCENARIO_LIST):
line_xcord = (((len(FNAME_PREFIX_LIST)-1)*(ix+1))+(ix)+0.4)
plt.axvline(x=line_xcord, linewidth=2, color='k')
labels = [x[1] for x in FNAME_PREFIX_LIST] *len(SCENARIO_LIST)
xticks = range(0,(len(labels)))
xticks = [x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(labels, rotation=75)
plt.subplots_adjust(bottom=0.25)
def plot_TaskResponseTime(use_sched_test_results=False):
all_scenario_all_actypes_distributions = OrderedDict()
all_distributions = []
for each_scenario in SCENARIO_LIST:
#print each_scenario
for each_fname_prefix in FNAME_PREFIX_LIST:
if("SCH" in each_fname_prefix[0]) and (use_sched_test_results == True): # if it's a schedulability test, we have to get the analytical results
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__schedtestresults.js"
json_data=open(fname)
flow_data = json.load(json_data)
sorted_task_data_response_times = flow_data[-1]["current_alltasks_wcrt_withdeps_distibution"] # get the sched test as at the last VS admission
else:
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__obuff.js"
json_data=open(fname)
flow_data = json.load(json_data)
# sort them according to id
sorted_task_data = sorted(flow_data, key=itemgetter('id'))
sorted_task_data_response_times = [float(x['et']-x['dt']) for x in sorted_task_data]
all_distributions.append(np.array(sorted_task_data_response_times))
#print each_fname_prefix
if (each_scenario not in all_scenario_all_actypes_distributions):
all_scenario_all_actypes_distributions[each_scenario] = OrderedDict()
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_task_data_response_times
else:
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_task_data_response_times
# plot all ac types flow-response-times for all ac-types
fig = plt.figure()
fig.canvas.set_window_title('plot_TaskResponseTime')
ax = plt.subplot(111)
dist_sizes = [len(x) for x in all_distributions]
pprint.pprint(dist_sizes)
dist_means = [np.mean(x) for x in all_distributions]
dist_max = [np.max(x) for x in all_distributions]
pprint.pprint(dist_means)
# plot a group of boxplots for each scenario
width=0.3
ind = np.arange(len(all_distributions))
ind_means = np.arange(len(dist_means))
ind_max = np.arange(len(dist_max))
pos_0 = ind
ax.boxplot(all_distributions,0,'', whis=3, positions=pos_0, widths=0.3)
ax.plot(ind_means, dist_means, marker='d', color='g', linewidth=0.3)
ax.plot(ind_max, dist_max, marker='x', color='r', linewidth=0.3)
plt.hold(True)
# put up vertical lines - seperating the different scenarios
for ix, each_scenario in enumerate(SCENARIO_LIST):
line_xcord = (((len(FNAME_PREFIX_LIST)-1)*(ix+1))+(ix)+0.4)
plt.axvline(x=line_xcord, linewidth=2, color='k')
labels = [x[1] for x in FNAME_PREFIX_LIST] *len(SCENARIO_LIST)
xticks = range(0,(len(labels)))
xticks = [x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(labels, rotation=75)
plt.subplots_adjust(bottom=0.25)
def plot_GoPResponseTime(use_sched_test_results=False):
all_scenario_all_actypes_distributions = OrderedDict()
all_distributions = []
for each_scenario in SCENARIO_LIST:
#print each_scenario
for each_fname_prefix in FNAME_PREFIX_LIST:
if("SCH" in each_fname_prefix[0]) and (use_sched_test_results == True): # if it's a schedulability test, we have to get the analytical results
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__schedtestresults.js"
json_data=open(fname)
gop_data = json.load(json_data)
sorted_gop_data_response_times = [x[1] for x in gop_data[-1]["current_stream_cp_wcet"]] # get the sched test as at the last VS admission
else:
# get data
fname_prefix = each_scenario[0] + each_fname_prefix[0] + "_"+str(NOC_H)+"_"+str(NOC_W)+"_"
fname = EXP_DATA_DIR + each_scenario[1] + "/" +fname_prefix + "test__gopsopbuffsumm.js"
json_data=open(fname)
gop_data = json.load(json_data)
# sort them according to id
sorted_gop_data = sorted(gop_data.values(), key=itemgetter('gop_unique_id'))
sorted_gop_data_response_times = [float(x['end_time']-x['start_time']) for x in sorted_gop_data]
if(len(sorted_gop_data_response_times)>0 and (None not in sorted_gop_data_response_times)):
all_distributions.append(sorted_gop_data_response_times)
else:
print each_fname_prefix
print each_scenario
sys.exit("Error!!")
#print each_fname_prefix
if (each_scenario not in all_scenario_all_actypes_distributions):
all_scenario_all_actypes_distributions[each_scenario] = OrderedDict()
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_gop_data_response_times
else:
all_scenario_all_actypes_distributions[each_scenario][each_fname_prefix[0]] = sorted_gop_data_response_times
# plot all ac types flow-response-times for all ac-types
fig = plt.figure()
fig.canvas.set_window_title('plot_GoPResponseTime')
ax = plt.subplot(111)
dist_sizes = [len(x) for x in all_distributions]
pprint.pprint(dist_sizes)
dist_means = [np.mean(x) for x in all_distributions]
dist_max = [np.max(x) for x in all_distributions]
pprint.pprint(dist_means)
# plot a group of boxplots for each scenario
width=0.3
ind = np.arange(len(all_distributions))
ind_means = np.arange(len(dist_means))
ind_max = np.arange(len(dist_max))
pos_0 = ind
ax.boxplot(all_distributions,0,'', whis=3, positions=pos_0, widths=0.3)
ax.plot(ind_means, dist_means, marker='d', color='g', linewidth=0.3)
ax.plot(ind_max, dist_max, marker='x', color='r', linewidth=0.3)
plt.hold(True)
# put up vertical lines - seperating the different scenarios
for ix, each_scenario in enumerate(SCENARIO_LIST):
line_xcord = (((len(FNAME_PREFIX_LIST)-1)*(ix+1))+(ix)+0.4)
plt.axvline(x=line_xcord, linewidth=2, color='k')
labels = [x[1] for x in FNAME_PREFIX_LIST] *len(SCENARIO_LIST)
xticks = range(0,(len(labels)))
xticks = [x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(labels, rotation=75)
plt.subplots_adjust(bottom=0.25)
#def plot_GoP_Lateness():
#
# nonnn_all_nocsizes_distributions = []
# nn_all_nocsizes_distributions = []
# nonn_max_goplateness_allnocs = []
# nonn_max_goplateness_allnocs = []
#
# all_gl_variances = []
# for each_noc_size in NOC_SIZE:
# noc_h = each_noc_size[0]
# noc_w = each_noc_size[1]
#
# # get non-nn scheme data
# FNAME_PREFIX = "Mapping_10_4_0_"+str(noc_h)+"_"+str(noc_w)+"_"
# fname = EXP_DATA_DIR + "/" + FNAME_PREFIX + "test__gopsopbuffsumm.js"
# json_data=open(fname)
# nonnn_data = json.load(json_data)
#
# # get nn scheme data
# FNAME_PREFIX = "Mapping_0_0_831_"+str(noc_h)+"_"+str(noc_w)+"_"
# fname = EXP_DATA_DIR + "/" + FNAME_PREFIX + "test__gopsopbuffsumm.js"
# json_data=open(fname)
# nn_data = json.load(json_data)
#
# # sort them according to id
# sorted_nonnn_data = sorted(nonnn_data, key=itemgetter('id'))
# sorted_nn_data = sorted(nn_data, key=itemgetter('id'))
#
# if(len(sorted_nonnn_data) != len(sorted_nn_data)):
# sys.exit("invalid sizes")
#
# nonnn_data_goplateness_distribution = [each_task['gop_execution_lateness'] for each_task in sorted_nonnn_data]
# nn_data_goplateness_distribution = [each_task['gop_execution_lateness'] for each_task in sorted_nn_data]
#
# nonnn_all_nocsizes_distributions.append(nonnn_data_goplateness_distribution)
# nn_all_nocsizes_distributions.append(nn_data_goplateness_distribution)
#
# gl_variance_positive= [float(rt_nn-rt_nonnn) for rt_nonnn, rt_nn in zip(nonnn_data_goplateness_distribution, nn_data_goplateness_distribution)
# if float(rt_nn-rt_nonnn)>0.0] # means nn resptime is longer - bad
# gl_variance_negative= [float(rt_nn-rt_nonnn) for rt_nonnn, rt_nn in zip(nonnn_data_goplateness_distribution, nn_data_goplateness_distribution)
# if (float(rt_nn-rt_nonnn))<0.0] # means nn resptime is shorter - good
# gl_variance= [float(rt_nn-rt_nonnn) for rt_nonnn, rt_nn in zip(nonnn_data_goplateness_distribution, nn_data_goplateness_distribution)]
#
# print "---"
# print each_noc_size
# print "rt_variance_positive =" + str(len(gl_variance_positive))
# print "rt_variance_negative =" + str(len(gl_variance_negative))
# print "---"
# all_gl_variances.append(gl_variance)
#
#
# fig = plt.figure()
# fig.canvas.set_window_title('plot_GoP_Lateness - varmeans')
# ax = plt.subplot(111)
#
# width=0.3
# nonnn_means = [np.mean(x) for x in nonnn_all_nocsizes_distributions]
# nn_means = [np.mean(x) for x in nn_all_nocsizes_distributions]
# variance_means = [np.mean(x) for x in all_gl_variances]
# ind = np.arange(len(nonnn_means))
# pos_0 = ind
# pos_1 = ind+width
# rects_ol = ax.bar(pos_0, nonnn_means, width, color='r')
# rects_cl = ax.bar(pos_1, nn_means, width, color='b')
#
# fig = plt.figure()
# fig.canvas.set_window_title('plot_GoP_Lateness-variance')
# ax = plt.subplot(111)
# ind = np.arange(len(all_gl_variances))
# width=0.8
# pos_0 = ind
# print len(all_gl_variances)
# print pos_0
# box_rt_variance = ax.boxplot(nonnn_all_nocsizes_distributions,0,'', whis=1, positions=pos_0, widths=0.3)
# box_rt_variance = ax.boxplot(nn_all_nocsizes_distributions,0,'', whis=1, positions=pos_1, widths=0.3)
#
#def plot_task_executioncosts():
# ol_all_nocsizes_distributions = []
# cl_all_nocsizes_distributions = []
# all_rt_variances = []
# for each_noc_size in NOC_SIZE:
# noc_h = each_noc_size[0]
# noc_w = each_noc_size[1]
#
# # get ol data
# FNAME_PREFIX = "Mapping_12_4_0_"+str(noc_h)+"_"+str(noc_w)+"_"
# fname = EXP_DATA_DIR + "/" + FNAME_PREFIX + "test__obuff.js"
# json_data=open(fname)
# ol_data = json.load(json_data)
#
# # get cl data
# FNAME_PREFIX = "Mapping_0_0_830_"+str(noc_h)+"_"+str(noc_w)+"_"
# fname = EXP_DATA_DIR + "/" + FNAME_PREFIX + "test__obuff.js"
# json_data=open(fname)
# cl_data = json.load(json_data)
#
# ol_data_cc_distribution = [each_task['cc'] for each_task in ol_data]
# cl_data_cc_distribution = [each_task['cc'] for each_task in cl_data]
#
# ol_all_nocsizes_distributions.append(ol_data_cc_distribution)
# cl_all_nocsizes_distributions.append(cl_data_cc_distribution)
#
# fig = plt.figure()
# fig.canvas.set_window_title('plot_task_executioncosts')
# ax = plt.subplot(111)
#
# width=0.3
# ol_means = [np.mean(x) for x in ol_all_nocsizes_distributions]
# cl_means = [np.mean(x) for x in cl_all_nocsizes_distributions]
# ind = np.arange(len(ol_means))
# pos_0 = ind
# pos_1 = ind+width
# rects_ol = ax.boxplot(ol_all_nocsizes_distributions,0,'', whis=1, positions=pos_0, widths=0.3)
# rects_cl = ax.boxplot(cl_all_nocsizes_distributions,0,'', whis=1, positions=pos_1, widths=0.3)
# ax.plot(pos_0, ol_means)
# ax.plot(pos_1, cl_means)
###################################
# HELPERS
###################################
###################################
# MAIN
###################################
#plot_FlowsResponseTime(use_sched_test_results=True)
#plot_TaskResponseTime(use_sched_test_results=True)
plot_GoPResponseTime(use_sched_test_results=True)
#plot_task_executioncosts()
#plot_GoP_Lateness()
print "finished"
plt.show()
|
gpl-3.0
|
dingocuster/scikit-learn
|
examples/model_selection/plot_precision_recall.py
|
249
|
6150
|
"""
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
festivalhopper/music-transcription
|
music_transcription/pitch_detection/read_data.py
|
1
|
8930
|
from sklearn.preprocessing import MultiLabelBinarizer
import soundfile
from warnings import warn
from xml.etree import ElementTree
from music_transcription.read_data import DATASET_CORRECTIONS
def read_data_y(wav_file_paths, truth_dataset_format_tuples,
sample_rate, subsampling_step,
min_pitch, max_pitch, onset_group_threshold_seconds=0.05, frame_rate_hz=None):
"""Read samples, onset times and labels.
Parameters
----------
wav_file_paths : list
List of WAV file paths.
truth_dataset_format_tuples : list
List of tuples (path_to_truth_file, dataset, format)
sample_rate : int
Expected sample rate of files
subsampling_step : int
If > 1: only take every nth sample.
min_pitch : int
Minimum expected pitch
max_pitch : int
Maximum expected pitch
onset_group_threshold_seconds : float
Onsets <= this threshold will be grouped together
frame_rate_hz
Expected frame rate. If set, condition sample_rate % frame_rate_hz != 0 will be asserted.
Returns
-------
tuple
((list_of_samples, list_of_onset_times), y, wav_file_paths_valid, truth_dataset_format_tuples_valid)
list_of_samples: list of samples per valid WAV file with valid truth
list_of_onset_times: list of list of onset times per valid WAV file with valid truth
y: label matrix for multilabel classification
wav_file_paths_valid: list of valid WAV files with valid truth
All iterables in this tuple have the same length, except y.
len(y) = sum([len(onset_times) for onset_times in list_of_onset_times])
"""
list_of_samples = []
list_of_onset_times = []
list_of_pitches = []
wav_file_paths_valid = []
truth_dataset_format_tuples_valid = []
for path_to_wav, truth_dataset_format_tuple in zip(wav_file_paths, truth_dataset_format_tuples):
path_to_xml, dataset, truth_format = truth_dataset_format_tuple
if truth_format != 'xml':
raise ValueError('Unsupported format {}'.format(truth_format))
samples = read_samples(path_to_wav, sample_rate, subsampling_step, frame_rate_hz=frame_rate_hz)
onset_times_grouped, pitches_grouped = _read_onset_times_pitches(path_to_xml, min_pitch, max_pitch, dataset,
onset_group_threshold_seconds)
# Skip invalid WAV files and files with missing or invalid truth.
if samples is not None and onset_times_grouped is not None and pitches_grouped is not None:
list_of_samples.append(samples)
list_of_onset_times.append(onset_times_grouped)
list_of_pitches.append(pitches_grouped)
wav_file_paths_valid.append(path_to_wav)
truth_dataset_format_tuples_valid.append(truth_dataset_format_tuple)
label_binarizer = MultiLabelBinarizer(classes=range(min_pitch, max_pitch + 1))
# No-op since classes are already passed in the constructor
label_binarizer.fit(None)
pitch_groups_flat = [pitch_group for pitches_grouped in list_of_pitches for pitch_group in pitches_grouped]
assert len(pitch_groups_flat) == sum([len(onset_times) for onset_times in list_of_onset_times])
# List of sets to binary label matrix for multilabel classification
y = label_binarizer.transform(pitch_groups_flat)
return (list_of_samples, list_of_onset_times), y, wav_file_paths_valid, truth_dataset_format_tuples_valid
def read_samples(path_to_wav, expected_sample_rate, subsampling_step, frame_rate_hz=None):
"""Read samples of a WAV file. Apply subsampling if subsampling_step > 1.
Parameters
----------
path_to_wav : str
Path to WAV file
expected_sample_rate : int
Expected sample rate of files
subsampling_step : int
If > 1: only take every nth sample.
frame_rate_hz : int
Expected frame rate. If set, condition sample_rate % frame_rate_hz != 0 will be asserted.
Returns
-------
samples : ndarray
1D ndarray containing the samples
WARNING: Only use this function if you don't have labels for a file. Otherwise always use read_data_y, even
if you don't need the labels at this moment. read_data_y makes sure files without proper labels are filtered out.
When using read_samples directly, there's a chance samples and labels will be out of sync.
"""
samples, sample_rate = soundfile.read(path_to_wav)
if len(samples.shape) > 1:
warn('Skipping ' + path_to_wav + ', cannot handle stereo signal.')
return None
elif sample_rate != expected_sample_rate:
warn('Skipping ' + path_to_wav +
', sample rate ' + str(sample_rate) + ' != expected sample rate ' + str(expected_sample_rate) + '.')
return None
elif frame_rate_hz is not None and sample_rate % frame_rate_hz != 0:
raise ValueError('Sample rate ' + str(sample_rate) + ' % frame rate ' + str(frame_rate_hz) + ' != 0')
return samples[::subsampling_step]
def _read_onset_times_pitches(path_to_xml, min_pitch, max_pitch, dataset, onset_group_threshold_seconds):
"""Read truth (onset times and pitches).
Parameters
----------
path_to_xml : str
Path to truth XML file
min_pitch : int
Minimum expected pitch
max_pitch : int
Maximum expected pitch
dataset : int
Dataset label of this file.
Used to adjust the onset time using music_transcription.read_data.DATASET_CORRECTIONS.
onset_group_threshold_seconds : float
Consecutive onsets less than onset_group_threshold_seconds apart will be grouped together
Returns
-------
tuple
(onset_times_grouped, pitches_grouped)
onset_times_grouped: List of onset times, with consecutive onsets less than onset_group_threshold_seconds apart grouped together.
pitches_grouped: List of pitch groups = chords.
All iterables in this tuple have the same length
"""
tree = ElementTree.parse(path_to_xml)
root = tree.getroot()
onset_times = []
pitches = []
for root_child in root:
if root_child.tag == 'transcription':
for event in root_child:
if event.tag != 'event':
raise ValueError('Unexpected XML element, expected event, got ' + event.tag)
onset_time = None
pitch = None
for event_child in event:
if event_child.tag == 'onsetSec':
onset_time = float(event_child.text) + DATASET_CORRECTIONS[dataset]
elif event_child.tag == 'pitch':
pitch = int(event_child.text)
if onset_time is not None and pitch is not None:
if pitch >= min_pitch and pitch <= max_pitch:
onset_times.append(onset_time)
pitches.append(pitch)
else:
warn('Skipping {}, pitch {} is out of range.'.format(path_to_xml, pitch))
return None, None
else:
raise ValueError('File {} does not contain both onset and pitch information: onset_time={}, pitch={}'.format(path_to_xml, onset_time, pitch))
break
# Sort by onset time since this is not always the case in the files.
onset_pitch_tuples_sorted = sorted(zip(onset_times, pitches), key=lambda t: t[0])
onset_times = [t[0] for t in onset_pitch_tuples_sorted]
pitches = [t[1] for t in onset_pitch_tuples_sorted]
onset_times_grouped, pitches_grouped = _group_onsets(onset_times, pitches, onset_group_threshold_seconds)
return onset_times_grouped, pitches_grouped
def _group_onsets(onset_times, pitches, onset_group_threshold_seconds, epsilon=1e-6):
"""Group onsets and corresponding pitches in a way that onsets closer than onset_group_threshold_seconds belong to the same group.
Assumes onset times are sorted (pitch_detection.read_data._read_onset_times_pitches does that).
"""
onset_times_grouped = []
pitches_grouped = []
last_onset = None
onset_group_start = None
onset_group_pitches = set()
for onset_time, pitch in zip(onset_times, pitches):
if last_onset is not None and onset_time - last_onset > onset_group_threshold_seconds + epsilon:
onset_times_grouped.append(onset_group_start)
pitches_grouped.append(onset_group_pitches)
onset_group_start = None
onset_group_pitches = set()
last_onset = onset_time
if onset_group_start is None:
onset_group_start = onset_time
onset_group_pitches.add(pitch)
onset_times_grouped.append(onset_group_start)
pitches_grouped.append(onset_group_pitches)
return onset_times_grouped, pitches_grouped
|
gpl-3.0
|
rishikksh20/scikit-learn
|
sklearn/cluster/k_means_.py
|
19
|
59631
|
"""K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
bsd-3-clause
|
lcharleux/oscillators
|
oscillators/example_code/linear_oscillator_energy.py
|
1
|
1849
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate, misc, fftpack, ndimage
from oscillators.oscillators import Oscillator, FindSteadyState
# Inputs
a = .01 # damping / mass
omega0 = 1. # resonance pulsation
omegad = 2. # drive pulsation
def ep_func(x): return .5 * omega0**2 * x**2
def ddotxd_func(t): return np.cos(omegad * t)
#def ddotxd_func(t): return 0.*t
Td = 2. * np.pi / omegad
time = np.linspace(0., 100. * omegad, 1024)
tav = 15.
o = Oscillator(
a = a,
ep_func = ep_func,
ddotxd_func = ddotxd_func,
time = time,
X0 = np.array([0., 0.]))
#o.solve()
FindSteadyState(o)
filter_core = Td * 5.
fig = plt.figure(0)
plt.clf()
fig.add_subplot(3,1,1)
plt.title("Linear oscillator: $\omega_d/\omega_0={0}$, $a = {1}$".format(omegad / omega0, a))
plt.plot(time, o.x, "b-", linewidth = 1.5)
#plt.xlabel("Time, $t$")
plt.ylabel("Position, $x$")
plt.grid()
fig.add_subplot(3,1,2)
#plt.plot(time, o.pa, "b-", linewidth = 1., label = "$p_a$")
#plt.plot(time, o.ph, "r-", linewidth = 1., label = "$p_h$")
plt.plot(time, ndimage.gaussian_filter(o.pa, filter_core), "c-", linewidth = 1., label = r"$\bar p_a$")
plt.plot(time, ndimage.gaussian_filter(o.ph, filter_core), "m-", linewidth = 1., label = r"$\bar p_h$")
#plt.xlabel("Time, $t$")
#plt.xlabel("Time, $t$")
plt.ylabel("Power, $p$")
plt.legend(loc = "upper left", ncol = 2)
plt.grid()
fig.add_subplot(3,1,3)
plt.plot(time, o.ep, "r-", linewidth = 1., label = "$e_p$")
plt.plot(time, o.ec, "g-", linewidth = 1., label = "$e_c$" )
plt.plot(time, o.e, "b-", linewidth = 1., label = "$e$")
plt.plot(time, ndimage.gaussian_filter(o.e, filter_core), "b--", linewidth = 2., label = r"$\bar e$")
plt.xlabel("Time, $t$")
plt.ylabel("Energy, $e$")
plt.grid()
plt.legend(loc = "upper left", ncol = 2)
plt.show()
plt.savefig("linear_oscillator_energy.pdf")
|
gpl-2.0
|
DonBeo/statsmodels
|
statsmodels/sandbox/infotheo.py
|
33
|
16417
|
"""
Information Theoretic and Entropy Measures
References
----------
Golan, As. 2008. "Information and Entropy Econometrics -- A Review and
Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145.
Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics.
Wiley & Sons, Chichester.
"""
#For MillerMadow correction
#Miller, G. 1955. Note on the bias of information estimates. Info. Theory
# Psychol. Prob. Methods II-B:95-100.
#For ChaoShen method
#Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when
#there are unseen species in sample. Environ. Ecol. Stat. 10:429-443.
#Good, I. J. 1953. The population frequencies of species and the estimation of population parameters.
#Biometrika 40:237-264.
#Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685.
#For NSB method
#Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T.,
#S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478.
#Cambridge (Massachusetts): MIT Press.
#For shrinkage method
#Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of
#continuous features. In International Conference on Machine Learning.
#Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization
#bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer-
#ing, Monash University.
from statsmodels.compat.python import range, lzip, lmap
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import logsumexp as sp_logsumexp
#TODO: change these to use maxentutils so that over/underflow is handled
#with the logsumexp.
def logsumexp(a, axis=None):
"""
Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a
Avoids numerical overflow.
Parameters
----------
a : array-like
The vector to exponentiate and sum
axis : int, optional
The axis along which to apply the operation. Defaults is None.
Returns
-------
sum(log(exp(a)))
Notes
-----
This function was taken from the mailing list
http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html
This should be superceded by the ufunc when it is finished.
"""
if axis is None:
# Use the scipy.maxentropy version.
return sp_logsumexp(a)
a = np.asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def _isproperdist(X):
"""
Checks to see if `X` is a proper probability distribution
"""
X = np.asarray(X)
if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):
return False
else:
return True
def discretize(X, method="ef", nbins=None):
"""
Discretize `X`
Parameters
----------
bins : int, optional
Number of bins. Default is floor(sqrt(N))
method : string
"ef" is equal-frequency binning
"ew" is equal-width binning
Examples
--------
"""
nobs = len(X)
if nbins == None:
nbins = np.floor(np.sqrt(nobs))
if method == "ef":
discrete = np.ceil(nbins * stats.rankdata(X)/nobs)
if method == "ew":
width = np.max(X) - np.min(X)
width = np.floor(width/nbins)
svec, ivec = stats.fastsort(X)
discrete = np.zeros(nobs)
binnum = 1
base = svec[0]
discrete[ivec[0]] = binnum
for i in range(1,nobs):
if svec[i] < base + width:
discrete[ivec[i]] = binnum
else:
base = svec[i]
binnum += 1
discrete[ivec[i]] = binnum
return discrete
#TODO: looks okay but needs more robust tests for corner cases
def logbasechange(a,b):
"""
There is a one-to-one transformation of the entropy value from
a log base b to a log base a :
H_{b}(X)=log_{b}(a)[H_{a}(X)]
Returns
-------
log_{b}(a)
"""
return np.log(b)/np.log(a)
def natstobits(X):
"""
Converts from nats to bits
"""
return logbasechange(np.e, 2) * X
def bitstonats(X):
"""
Converts from bits to nats
"""
return logbasechange(2, np.e) * X
#TODO: make this entropy, and then have different measures as
#a method
def shannonentropy(px, logbase=2):
"""
This is Shannon's entropy
Parameters
-----------
logbase, int or np.e
The base of the log
px : 1d or 2d array_like
Can be a discrete probability distribution, a 2d joint distribution,
or a sequence of probabilities.
Returns
-----
For log base 2 (bits) given a discrete distribution
H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))]
For log base 2 (bits) given a joint distribution
H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj})
Notes
-----
shannonentropy(0) is defined as 0
"""
#TODO: haven't defined the px,py case?
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
entropy = -np.sum(np.nan_to_num(px*np.log2(px)))
if logbase != 2:
return logbasechange(2,logbase) * entropy
else:
return entropy
# Shannon's information content
def shannoninfo(px, logbase=2):
"""
Shannon's information
Parameters
----------
px : float or array-like
`px` is a discrete probability distribution
Returns
-------
For logbase = 2
np.log2(px)
"""
px = np.asarray(px)
if not np.all(px <= 1) or not np.all(px >= 0):
raise ValueError("px does not define proper distribution")
if logbase != 2:
return - logbasechange(2,logbase) * np.log2(px)
else:
return - np.log2(px)
def condentropy(px, py, pxpy=None, logbase=2):
"""
Return the conditional entropy of X given Y.
Parameters
----------
px : array-like
py : array-like
pxpy : array-like, optional
If pxpy is None, the distributions are assumed to be independent
and conendtropy(px,py) = shannonentropy(px)
logbase : int or np.e
Returns
-------
sum_{kj}log(q_{j}/w_{kj}
where q_{j} = Y[j]
and w_kj = X[k,j]
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy)))
if logbase == 2:
return condent
else:
return logbasechange(2, logbase) * condent
def mutualinfo(px,py,pxpy, logbase=2):
"""
Returns the mutual information between X and Y.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like
The joint probability distribution of random variables X and Y.
Note that if X and Y are independent then the mutual information
is zero.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
shannonentropy(px) - condentropy(px,py,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy,
logbase=logbase)
def corrent(px,py,pxpy,logbase=2):
"""
An information theoretic correlation measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase)
Notes
-----
This is also equivalent to
corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,
logbase=logbase)
def covent(px,py,pxpy,logbase=2):
"""
An information theoretic covariance measure.
Reflects linear and nonlinear correlation between two random variables
X and Y, characterized by the discrete probability distributions px and py
respectively.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
Returns
-------
condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
Notes
-----
This is also equivalent to
covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy)
"""
if not _isproperdist(px) or not _isproperdist(py):
raise ValueError("px or py is not a proper probability distribution")
if pxpy != None and not _isproperdist(pxpy):
raise ValueError("pxpy is not a proper joint distribtion")
if pxpy == None:
pxpy = np.outer(py,px)
return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy,
logbase=logbase)
#### Generalized Entropies ####
def renyientropy(px,alpha=1,logbase=2,measure='R'):
"""
Renyi's generalized entropy
Parameters
----------
px : array-like
Discrete probability distribution of random variable X. Note that
px is assumed to be a proper probability distribution.
logbase : int or np.e, optional
Default is 2 (bits)
alpha : float or inf
The order of the entropy. The default is 1, which in the limit
is just Shannon's entropy. 2 is Renyi (Collision) entropy. If
the string "inf" or numpy.inf is specified the min-entropy is returned.
measure : str, optional
The type of entropy measure desired. 'R' returns Renyi entropy
measure. 'T' returns the Tsallis entropy measure.
Returns
-------
1/(1-alpha)*log(sum(px**alpha))
In the limit as alpha -> 1, Shannon's entropy is returned.
In the limit as alpha -> inf, min-entropy is returned.
"""
#TODO:finish returns
#TODO:add checks for measure
if not _isproperdist(px):
raise ValueError("px is not a proper probability distribution")
alpha = float(alpha)
if alpha == 1:
genent = shannonentropy(px)
if logbase != 2:
return logbasechange(2, logbase) * genent
return genent
elif 'inf' in string(alpha).lower() or alpha == np.inf:
return -np.log(np.max(px))
# gets here if alpha != (1 or inf)
px = px**alpha
genent = np.log(px.sum())
if logbase == 2:
return 1/(1-alpha) * genent
else:
return 1/(1-alpha) * logbasechange(2, logbase) * genent
#TODO: before completing this, need to rethink the organization of
# (relative) entropy measures, ie., all put into one function
# and have kwdargs, etc.?
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
if __name__ == "__main__":
print("From Golan (2008) \"Information and Entropy Econometrics -- A Review \
and Synthesis")
print("Table 3.1")
# Examples from Golan (2008)
X = [.2,.2,.2,.2,.2]
Y = [.322,.072,.511,.091,.004]
for i in X:
print(shannoninfo(i))
for i in Y:
print(shannoninfo(i))
print(shannonentropy(X))
print(shannonentropy(Y))
p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5]
plt.subplot(111)
plt.ylabel("Information")
plt.xlabel("Probability")
x = np.linspace(0,1,100001)
plt.plot(x, shannoninfo(x))
# plt.show()
plt.subplot(111)
plt.ylabel("Entropy")
plt.xlabel("Probability")
x = np.linspace(0,1,101)
plt.plot(x, lmap(shannonentropy, lzip(x,1-x)))
# plt.show()
# define a joint probability distribution
# from Golan (2008) table 3.3
w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]])
# table 3.4
px = w.sum(0)
py = w.sum(1)
H_X = shannonentropy(px)
H_Y = shannonentropy(py)
H_XY = shannonentropy(w)
H_XgivenY = condentropy(px,py,w)
H_YgivenX = condentropy(py,px,w)
# note that cross-entropy is not a distance measure as the following shows
D_YX = logbasechange(2,np.e)*stats.entropy(px, py)
D_XY = logbasechange(2,np.e)*stats.entropy(py, px)
I_XY = mutualinfo(px,py,w)
print("Table 3.3")
print(H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY)
print("discretize functions")
X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2,
24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4,
16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5,
12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6])
discX = discretize(X)
#CF: R's infotheo
#TODO: compare to pyentropy quantize?
print
print("Example in section 3.6 of Golan, using table 3.3")
print("Bounding errors using Fano's inequality")
print("H(P_{e}) + P_{e}log(K-1) >= H(X|Y)")
print("or, a weaker inequality")
print("P_{e} >= [H(X|Y) - 1]/log(K)")
print("P(x) = %s" % px)
print("X = 3 has the highest probability, so this is the estimate Xhat")
pe = 1 - px[2]
print("The probability of error Pe is 1 - p(X=3) = %0.4g" % pe)
H_pe = shannonentropy([pe,1-pe])
print("H(Pe) = %0.4g and K=3" % H_pe)
print("H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \
(H_pe+pe*np.log2(2), H_XgivenY))
print("or using the weaker inequality")
print("Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3)))
print("Consider now, table 3.5, where there is additional information")
print("The conditional probabilities of P(X|Y=y) are ")
w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]])
print(w2)
# not a proper distribution?
print("The probability of error given this information is")
print("Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3)))
print("such that more information lowers the error")
### Stochastic processes
markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
|
bsd-3-clause
|
openEduConnect/eduextractor
|
setup.py
|
1
|
3100
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='eduextractor',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.3',
description='Open Extraction Tools for Education',
long_description=long_description,
# The project's main homepage.
url='https://github.com/openEduConnect/eduextractor',
# Author details
author='Hunter Owens',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas', 'selenium', 'pyyaml',
'requests', 'click', 'sphinx', 'lxml',
'sqlalchemy', 'tqdm'],
# Entry points for command line integration
entry_points="""
[console_scripts]
eduextractor=eduextractor.cli:cli
""",
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
mit
|
ashhher3/scikit-learn
|
sklearn/tests/test_common.py
|
2
|
15087
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_estimator_sparse_data,
check_transformer,
check_clustering,
check_clusterer_compute_labels_predict,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_pickle,
check_transformers_unfitted,
check_estimators_nan_inf,
check_estimators_unfitted,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_sparsify_coefficients,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
# FIXME these should be done also for non-mixin estimators!
estimators = all_estimators(type_filter=['classifier', 'regressor',
'transformer', 'cluster'])
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION + ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf, name, Estimator
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients, name, Estimator
yield check_estimator_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
yield check_transformers_unfitted, name, Transformer
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_clusterer_compute_labels_predict, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
# test if NotFittedError is raised
yield check_estimators_unfitted, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
# Test if NotFittedError is raised
yield check_estimators_unfitted, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
|
bsd-3-clause
|
marcocaccin/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
ashariati/rpg_svo
|
svo_analysis/scripts/compare_results.py
|
17
|
6127
|
#!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def compare_results(experiments, results_dir, comparison_dir,
plot_scale_drift = False):
# ------------------------------------------------------------------------------
# position error
fig_poserr = plt.figure(figsize=(8,6))
ax_poserr_x = fig_poserr.add_subplot(311, ylabel='x-error [m]')
ax_poserr_y = fig_poserr.add_subplot(312, ylabel='y-error [m]')
ax_poserr_z = fig_poserr.add_subplot(313, ylabel='z-error [m]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
trans_error = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
trans_error[:,0] = trans_error[:,0]-trans_error[0,0]
ax_poserr_x.plot(trans_error[:,0], trans_error[:,1], label=params['experiment_label'])
ax_poserr_y.plot(trans_error[:,0], trans_error[:,2])
ax_poserr_z.plot(trans_error[:,0], trans_error[:,3])
ax_poserr_x.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_y.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_z.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_x.legend(bbox_to_anchor=[0, 0], loc='lower left', ncol=3)
ax_poserr_x.grid()
ax_poserr_y.grid()
ax_poserr_z.grid()
fig_poserr.tight_layout()
fig_poserr.savefig(os.path.join(comparison_dir, 'translation_error.pdf'))
# ------------------------------------------------------------------------------
# orientation error
fig_roterr = plt.figure(figsize=(8,6))
ax_roterr_r = fig_roterr.add_subplot(311, ylabel='roll-error [rad]')
ax_roterr_p = fig_roterr.add_subplot(312, ylabel='pitch-error [rad]')
ax_roterr_y = fig_roterr.add_subplot(313, ylabel='yaw-error [rad]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
rot_error = np.loadtxt(os.path.join(results_dir, exp, 'orientation_error.txt'))
rot_error[:,0] = rot_error[:,0]-rot_error[0,0]
ax_roterr_r.plot(rot_error[:,0], rot_error[:,3], label=params['experiment_label'])
ax_roterr_p.plot(rot_error[:,0], rot_error[:,2])
ax_roterr_y.plot(rot_error[:,0], rot_error[:,1])
ax_roterr_r.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_p.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_y.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_r.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_roterr_r.grid()
ax_roterr_p.grid()
ax_roterr_y.grid()
fig_roterr.tight_layout()
fig_roterr.savefig(os.path.join(comparison_dir, 'orientation_error.pdf'))
# ------------------------------------------------------------------------------
# scale error
if plot_scale_drift:
fig_scale = plt.figure(figsize=(8,2.5))
ax_scale = fig_scale.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]')
for exp in experiments:
# load dataset parameters
params = yaml.load(open(os.path.join(results_dir, exp, 'params.yaml')))
# plot translation error
scale_drift = open(os.path.join(results_dir, exp, 'scale_drift.txt'))
scale_drift[:,0] = scale_drift[:,0]-scale_drift[0,0]
ax_scale.plot(scale_drift[:,0], scale_drift[:,1], label=params['experiment_label'])
ax_scale.set_xlim([0, rot_error[-1,0]+4])
ax_scale.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_scale.grid()
fig_scale.tight_layout()
fig_scale.savefig(os.path.join(comparison_dir, 'scale_drift.pdf'))
# ------------------------------------------------------------------------------
# trajectory
# fig_traj = plt.figure(figsize=(8,4.8))
# ax_traj = fig_traj.add_subplot(111, xlabel='x [m]', ylabel='y [m]', aspect='equal', xlim=[-3.1, 4], ylim=[-1.5, 2.6])
#
# plotTrajectory(ax_traj, '/home/cforster/Datasets/asl_vicon_d2/groundtruth_filtered.txt', 'Groundtruth', 'k', 1.5)
# plotTrajectory(ax_traj, results_dir+'/20130911_2229_nslam_i7_asl2_fast/traj_estimate_rotated.txt', 'Fast', 'g', 1)
# plotTrajectory(ax_traj, results_dir+'/20130906_2149_ptam_i7_asl2/traj_estimate_rotated.txt', 'PTAM', 'r', 1)
#
# mark_inset(ax_traj, axins, loc1=2, loc2=4, fc="none", ec='b')
# plt.draw()
# plt.show()
# ax_traj.legend(bbox_to_anchor=[1, 0], loc='lower right', ncol=3)
# ax_traj.grid()
# fig_traj.tight_layout()
# fig_traj.savefig('../results/trajectory_asl.pdf')
if __name__ == '__main__':
default_name = time.strftime("%Y%m%d_%H%M", time.localtime())+'_comparison'
parser = argparse.ArgumentParser(description='Compare results.')
parser.add_argument('result_directories', nargs='+', help='list of result directories to compare')
parser.add_argument('--name', help='name of the comparison', default=default_name)
args = parser.parse_args()
# create folder for comparison results
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
comparison_dir = os.path.join(results_dir, args.name)
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# run comparison
compare_results(args.result_directories, results_dir, comparison_dir)
|
gpl-3.0
|
RCollins13/Holmes
|
readpaircluster/inheritance/plot.py
|
2
|
2181
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 msto <[email protected]>
#
# Distributed under terms of the MIT license.
"""
"""
# from sys import maxsize
import matplotlib.pyplot as plt
# from inheritance import plot_inheritance
import pandas as pd
flierprops = dict(marker='o', markerfacecolor='green', markersize=7,
linestyle='none')
# Plot counts
# for sv in 'del dup inv tloc'.split():
# for caller in 'lumpy delly rpc'.split():
# calls = pd.DataFrame({})
# for freq in [0.1, 0.25, 0.5, 0.75, 0.9, 1.0]:
# counts = pd.read_table(
# 'all.{0}.{1}.{2}.counts.txt'.format(caller, sv, freq),
# index_col=0)
# try:
# calls[freq] = counts[['denovo', 'uniparental', 'biparental']].sum(axis=1)
# except:
# import pdb
# pdb.set_trace()
# if caller != 'rpc':
# bp500 = pd.read_table('raw_counts/probands.raw_counts.500bp.{0}.{1}.txt'.format(caller, sv), header=None, names=('sample', 'count'), index_col=0)
# calls['500bp'] = bp500['count']
# raw = pd.read_table('raw_counts/probands.raw_counts.{0}.{1}.txt'.format(caller, sv), header=None, names=('sample', 'count'), index_col=0)
# calls['raw'] = raw['count']
# calls.plot(kind='box', title='{0} {1}'.format(caller, sv),
# flierprops=flierprops)
# plt.savefig('{0}.{1}.box_counts.full.png'.format(caller, sv))
for sv in 'del dup inv tloc'.split():
for caller in 'lumpy delly rpc'.split():
rates = pd.DataFrame({})
for freq in [0.1, 0.25, 0.5, 0.75, 0.9, 1.0]:
counts = pd.read_table(
'all.{0}.{1}.{2}.counts.txt'.format(caller, sv, freq),
index_col=0)
counts.drop('absent', axis=1, inplace=True)
rates[freq] = counts['denovo'] / counts.sum(axis=1)
rates.plot(kind='box', ylim=(0, 1), title='{0} {1}'.format(caller, sv),
flierprops=flierprops)
plt.savefig('{0}.{1}.family_rates.png'.format(caller, sv))
|
mit
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/openassessment/assessment/worker/algorithm.py
|
10
|
12616
|
"""
Define the ML algorithms used to train text classifiers.
"""
try:
import cPickle as pickle
except ImportError:
import pickle
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import importlib
import traceback
import base64
from django.conf import settings
DEFAULT_AI_ALGORITHMS = {
'fake': 'openassessment.assessment.worker.algorithm.FakeAIAlgorithm',
'ease': 'openassessment.assessment.worker.algorithm.EaseAIAlgorithm'
}
class AIAlgorithmError(Exception):
"""
An error occurred when using an AI algorithm.
Superclass for more specific errors below.
"""
pass
class UnknownAlgorithm(AIAlgorithmError):
"""
Algorithm ID not found in the configuration.
"""
def __init__(self, algorithm_id):
msg = u"Could not find algorithm \"{}\" in the configuration.".format(algorithm_id)
super(UnknownAlgorithm, self).__init__(msg)
class AlgorithmLoadError(AIAlgorithmError):
"""
Unable to load the algorithm class.
"""
def __init__(self, algorithm_id, algorithm_path):
msg = (
u"Could not load algorithm \"{algorithm_id}\" from \"{path}\""
).format(algorithm_id=algorithm_id, path=algorithm_path)
super(AlgorithmLoadError, self).__init__(msg)
class TrainingError(AIAlgorithmError):
"""
An error occurred while training a classifier from example essays.
"""
pass
class ScoreError(AIAlgorithmError):
"""
An error occurred while scoring an essay.
"""
pass
class InvalidClassifier(ScoreError):
"""
The classifier could not be used by this algorithm to score an essay.
"""
pass
class AIAlgorithm(object):
"""
Abstract base class for a supervised ML text classification algorithm.
"""
__metaclass__ = ABCMeta
# Example essay used as input to the training algorithm
# `text` is a unicode string representing a student essay submission.
# `score` is an integer score.
# Note that `score` is used as an arbitrary label, so you could
# have a set of examples with non-adjacent scores.
ExampleEssay = namedtuple('ExampleEssay', ['text', 'score'])
@abstractmethod
def train_classifier(self, examples):
"""
Train a classifier based on example essays and scores.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
JSON-serializable: The trained classifier. This MUST be JSON-serializable.
Raises:
TrainingError: The classifier could not be trained successfully.
"""
pass
@abstractmethod
def score(self, text, classifier, cache):
"""
Score an essay using a classifier.
Args:
text (unicode): The text to classify.
classifier (JSON-serializable): A classifier, using the same format
as `train_classifier()`.
cache (dict): An in-memory cache that persists until all criteria
in the rubric have been scored.
Raises:
InvalidClassifier: The provided classifier cannot be used by this algorithm.
ScoreError: An error occurred while scoring.
"""
pass
@classmethod
def algorithm_for_id(cls, algorithm_id):
"""
Load an algorithm based on Django settings configuration.
Args:
algorithm_id (unicode): The identifier for the algorithm,
which should be specified in Django settings.
Returns:
AIAlgorithm
Raises:
UnknownAlgorithm
"""
algorithms = getattr(settings, "ORA2_AI_ALGORITHMS", DEFAULT_AI_ALGORITHMS)
cls_path = algorithms.get(algorithm_id)
if cls_path is None:
raise UnknownAlgorithm(algorithm_id)
else:
module_path, _, name = cls_path.rpartition('.')
try:
algorithm_cls = getattr(importlib.import_module(module_path), name)
return algorithm_cls()
except (ImportError, ValueError, AttributeError):
raise AlgorithmLoadError(algorithm_id, cls_path)
class FakeAIAlgorithm(AIAlgorithm):
"""
Fake AI algorithm implementation that assigns scores randomly.
We use this for testing the pipeline independently of EASE.
"""
def train_classifier(self, examples):
"""
Store the possible score labels, which will allow
us to deterministically choose scores for other essays.
"""
unique_sorted_scores = sorted(list(set(example.score for example in examples)))
return {'scores': unique_sorted_scores}
def score(self, text, classifier, cache):
"""
Choose a score for the essay deterministically based on its length.
"""
if 'scores' not in classifier or len(classifier['scores']) == 0:
raise InvalidClassifier("Classifier must provide score labels")
else:
score_index = len(text) % len(classifier['scores'])
return classifier['scores'][score_index]
class EaseAIAlgorithm(AIAlgorithm):
"""
Wrapper for the EASE library.
See https://github.com/edx/ease for more information.
Since EASE has many system dependencies, we don't include it explicitly
in edx-ora2 requirements. When testing locally, we use the fake
algorithm implementation instead.
"""
def train_classifier(self, examples):
"""
Train a text classifier using the EASE library.
The classifier is serialized as a dictionary with keys:
* 'feature_extractor': The pickled feature extractor (transforms text into a numeric feature vector).
* 'score_classifier': The pickled classifier (uses the feature vector to assign scores to essays).
Because we are using `pickle`, the serialized classifiers are unfortunately
tied to the particular version of ease/scikit-learn/numpy/scipy/nltk that we
have installed at the time of training.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
dict: The serializable classifier.
Raises:
TrainingError: The classifier could not be trained successfully.
"""
feature_ext, classifier = self._train_classifiers(examples)
return self._serialize_classifiers(feature_ext, classifier)
def score(self, text, classifier, cache):
"""
Score essays using EASE.
Args:
text (unicode): The essay text to score.
classifier (dict): The serialized classifiers created during training.
cache (dict): An in-memory cache that persists until all criteria
in the rubric have been scored.
Returns:
int
Raises:
InvalidClassifier
ScoreError
"""
try:
from ease.essay_set import EssaySet # pylint:disable=F0401
except ImportError:
msg = u"Could not import EASE to grade essays."
raise ScoreError(msg)
feature_extractor, score_classifier = self._deserialize_classifiers(classifier)
# The following is a modified version of `ease.grade.grade()`,
# skipping things we don't use (cross-validation, feedback)
# and caching essay sets across criteria. This allows us to
# avoid some expensive NLTK operations, particularly tagging
# parts of speech.
try:
# Get the essay set from the cache or create it.
# Since all essays to be graded are assigned a dummy
# score of "0", we can safely re-use the essay set
# for each criterion in the rubric.
# EASE can't handle non-ASCII unicode, so we need
# to strip out non-ASCII chars.
essay_set = cache.get('grading_essay_set')
if essay_set is None:
essay_set = EssaySet(essaytype="test")
essay_set.add_essay(text.encode('ascii', 'ignore'), 0)
cache['grading_essay_set'] = essay_set
# Extract features from the text
features = feature_extractor.gen_feats(essay_set)
# Predict a score
return int(score_classifier.predict(features)[0])
except:
msg = (
u"An unexpected error occurred while using "
u"EASE to score an essay: {traceback}"
).format(traceback=traceback.format_exc())
raise ScoreError(msg)
def _train_classifiers(self, examples):
"""
Use EASE to train classifiers.
Args:
examples (list of AIAlgorithm.ExampleEssay): Example essays and scores.
Returns:
tuple of `feature_extractor` (an `ease.feature_extractor.FeatureExtractor` object)
and `classifier` (a `sklearn.ensemble.GradientBoostingClassifier` object).
Raises:
TrainingError: Could not load EASE or could not complete training.
"""
try:
from ease.create import create # pylint: disable=F0401
except ImportError:
msg = u"Could not import EASE to perform training."
raise TrainingError(msg)
input_essays = [example.text for example in examples]
input_scores = [example.score for example in examples]
try:
# Train the classifiers
# The third argument is the essay prompt, which EASE uses
# to check if an input essay is too similar to the prompt.
# Since we're not using this feature, we pass in an empty string.
results = create(input_essays, input_scores, "")
except:
msg = (
u"An unexpected error occurred while using "
u"EASE to train classifiers: {traceback}"
).format(traceback=traceback.format_exc())
raise TrainingError(msg)
if not results.get('success', False):
msg = (
u"Errors occurred while training classifiers "
u"using EASE: {errors}"
).format(errors=results.get('errors', []))
raise TrainingError(msg)
return results.get('feature_ext'), results.get('classifier')
def _serialize_classifiers(self, feature_ext, classifier):
"""
Serialize the classifier objects.
Args:
feature_extractor (ease.feature_extractor.FeatureExtractor)
classifier (sklearn.ensemble.GradientBoostingClassifier)
Returns:
dict containing the pickled classifiers
Raises:
TrainingError: Could not serialize the classifiers.
"""
try:
return {
'feature_extractor': base64.b64encode(pickle.dumps(feature_ext)),
'score_classifier': base64.b64encode(pickle.dumps(classifier)),
}
except Exception as ex:
msg = (
u"An error occurred while serializing the classifiers "
u"created by EASE: {ex}"
).format(ex=ex)
raise TrainingError(msg)
def _deserialize_classifiers(self, classifier_data):
"""
Deserialize the classifier objects.
Args:
classifier_data (dict): The serialized classifiers.
Returns:
tuple of `(feature_extractor, score_classifier)`
Raises:
InvalidClassifier
"""
if not isinstance(classifier_data, dict):
raise InvalidClassifier("Classifier must be a dictionary.")
try:
classifier_str = classifier_data.get('feature_extractor').encode('utf-8')
feature_extractor = pickle.loads(base64.b64decode(classifier_str))
except Exception as ex:
msg = (
u"An error occurred while deserializing the "
u"EASE feature extractor: {ex}"
).format(ex=ex)
raise InvalidClassifier(msg)
try:
score_classifier_str = classifier_data.get('score_classifier').encode('utf-8')
score_classifier = pickle.loads(base64.b64decode(score_classifier_str))
except Exception as ex:
msg = (
u"An error occurred while deserializing the "
u"EASE score classifier: {ex}"
).format(ex=ex)
raise InvalidClassifier(msg)
return feature_extractor, score_classifier
|
agpl-3.0
|
NicWayand/xray
|
xarray/test/test_variable.py
|
1
|
46316
|
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, Dataset, DataArray
from xarray.core import indexing
from xarray.core.variable import (Coordinate, as_variable, as_compatible_data)
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from . import TestCase, source_ndarray
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_pandas_period_index(self):
v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B'))
self.assertEqual(v[0], pd.Period('2000', freq='B'))
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
v = self.cls(['x'], x)
# unary ops
self.assertVariableIdentical(v, +v)
self.assertVariableIdentical(v, abs(v))
self.assertArrayEqual((-v).values, -x)
# binary ops with numbers
self.assertVariableIdentical(v, v + 0)
self.assertVariableIdentical(v, 0 + v)
self.assertVariableIdentical(v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y))
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, Coordinate)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, Coordinate)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
self.assertVariableIdentical(v.argsort(), v)
self.assertVariableIdentical(v.clip(2, 3), self.cls('x', x.clip(2, 3)))
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)))
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), Coordinate)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = self.cls('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls('x', range(3), {'foo': 1}, {'bar': 2})
for actual in [expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.expand_dims({'x': 3}),
expected.copy(deep=True),
expected.copy(deep=False)]:
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.encoding, actual.encoding)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat(
[v, w],
positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)],
dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'all input arrays must have'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)]))
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
v = self.cls('x', midx)
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIsInstance(w._data, PandasIndexAdapter)
self.assertIsInstance(w.to_index(), pd.MultiIndex)
self.assertArrayEqual(v._data.array, w._data.array)
def test_real_and_imag(self):
v = self.cls('x', np.arange(3) - 1j * np.arange(3), {'foo': 'bar'})
expected_re = self.cls('x', np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.real, expected_re)
expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.imag, expected_im)
expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2))
self.assertVariableAllClose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls('x', [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
self.assertVariableAllClose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype='int64'))
v = self.cls('x', data)
print(v) # should not error
assert v.dtype == 'int64'
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(start='2000-01-01',
tz=pytz.timezone('America/New_York'),
periods=10, freq='1h')
v = self.cls('x', data)
print(v) # should not error
if 'America/New_York' in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == 'object'
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list('abc'), [0, 1]])
v = self.cls('x', idx)
self.assertVariableIdentical(Variable((), ('a', 0)), v[0])
self.assertVariableIdentical(v, v[:])
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = Coordinate('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
self.assertVariableIdentical(expected, as_variable(ds['x']))
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
xarray_tuple = (expected.dims, expected.values)
self.assertVariableIdentical(expected, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'tuples to convert'):
as_variable(tuple(data))
with self.assertRaisesRegexp(
TypeError, 'without an explicit list of dimensions'):
as_variable(data)
actual = as_variable(data, name='x')
self.assertVariableIdentical(expected, actual)
self.assertIsInstance(actual, Coordinate)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
v = Variable([], np.unicode_(u'asdf'))
self.assertVariableIdentical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(('x'), [u'tmax'])[0][()]
expected = Variable((), u'tmax')
self.assertVariableIdentical(actual, expected)
def test_shift(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.shift(x=0))
self.assertIsNot(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.shift(x=1))
expected = Variable('x', [np.nan, np.nan, 1, 2, 3])
self.assertVariableIdentical(expected, v.shift(x=2))
expected = Variable('x', [2, 3, 4, 5, np.nan])
self.assertVariableIdentical(expected, v.shift(x=-1))
expected = Variable('x', [np.nan] * 5)
self.assertVariableIdentical(expected, v.shift(x=5))
self.assertVariableIdentical(expected, v.shift(x=6))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.shift(z=0)
v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'})
self.assertVariableIdentical(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'})
self.assertVariableIdentical(expected, v.shift(x=1))
def test_shift2d(self):
v = Variable(('x', 'y'), [[1, 2], [3, 4]])
expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
self.assertVariableIdentical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.roll(x=0))
self.assertIsNot(v, v.roll(x=0))
expected = Variable('x', [5, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.roll(x=1))
self.assertVariableIdentical(expected, v.roll(x=-4))
self.assertVariableIdentical(expected, v.roll(x=6))
expected = Variable('x', [4, 5, 1, 2, 3])
self.assertVariableIdentical(expected, v.roll(x=2))
self.assertVariableIdentical(expected, v.roll(x=-3))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(('x', 'y'), np.random.randn(5, 6))
for axis, dim in [(0, 'x'), (1, 'y')]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
self.assertArrayEqual(expected, actual)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_transpose_0d(self):
for value in [
3.5,
('a', 1),
np.datetime64('2000-01-01'),
np.timedelta64(1, 'h'),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_expand_dims(self):
v = Variable(['x'], [0, 1])
actual = v.expand_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.expand_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.expand_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.expand_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.expand_dims(['z'])
def test_expand_dims_object_dtype(self):
v = Variable([], ('a', 1))
actual = v.expand_dims(('x',), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ('a', 1)
expected = Variable(['x'], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
actual = v.stack(z=('x', 'y'))
expected = Variable('z', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=('x',))
expected = Variable(('y', 'z'), v.data.T, v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=(),)
self.assertVariableIdentical(actual, v)
actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y')
expected = Variable(('X', 'Y'), v.data, v.attrs)
self.assertVariableIdentical(actual, expected)
def test_stack_errors(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.stack(z=('x1',))
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(x=('x',))
def test_unstack(self):
v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'})
actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)]))
expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4)]))
expected = Variable('x', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
def test_unstack_errors(self):
v = Variable('z', [0, 1, 2, 3])
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.unstack(foo={'x': 4})
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(z=('z',))
with self.assertRaisesRegexp(ValueError, 'the product of the new dim'):
v.unstack(z={'x': 5})
def test_unstack_2d(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = v.unstack(y={'z': 2})
expected = Variable(['x', 'z'], v.data)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(x={'z': 2})
expected = Variable(['y', 'z'], v.data.T)
self.assertVariableIdentical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = (v.stack(z=('x', 'y'))
.unstack(z=OrderedDict([('x', 2), ('y', 2)])))
self.assertVariableIdentical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x)))
self.assertVariableIdentical(
v[0] * v[:, 0],
Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0])))
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(['b', 'c', 'd'], y)
self.assertVariableIdentical(
v * w, Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,bcd->abcd', x, y)))
self.assertVariableIdentical(
w * v, Variable(['b', 'c', 'd', 'a'],
np.einsum('bcd,ab->bcda', y, x)))
self.assertVariableIdentical(
v * w[0], Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,cd->abcd', x, y[0])))
def test_broadcasting_failures(self):
a = Variable(['x'], np.arange(10))
b = Variable(['x'], np.arange(5))
c = Variable(['x', 'x'], np.arange(100).reshape(10, 10))
with self.assertRaisesRegexp(ValueError, 'mismatched lengths'):
a + b
with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(['x'], x)
v2 = v
v2 += 1
self.assertIs(v, v2)
# since we provided an ndarray for data, it is also modified in-place
self.assertIs(source_ndarray(v.values), x)
self.assertArrayEqual(v.values, np.arange(5) + 1)
with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'):
v += Variable('y', np.arange(5))
def test_reduce(self):
v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
self.assertVariableIdentical(v.reduce(np.std, 'x'),
Variable(['y'], self.d.std(axis=0)))
self.assertVariableIdentical(v.reduce(np.std, axis=0),
v.reduce(np.std, dim='x'))
self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
Variable([], self.d.std(axis=(0, 1))))
self.assertVariableIdentical(v.reduce(np.std),
Variable([], self.d.std()))
self.assertVariableIdentical(
v.reduce(np.mean, 'x').reduce(np.std, 'y'),
Variable([], self.d.mean(axis=0).std()))
self.assertVariableAllClose(v.mean('x'), v.reduce(np.mean, 'x'))
with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
v.mean(dim='x', axis=0)
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype='>f4')
v = Variable(['x'], data)
expected = Variable([], 5)
self.assertVariableIdentical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable('x', np.array([1, np.nan, 2, 3]))
self.assertVariableIdentical(v.mean(), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan))
self.assertVariableIdentical(np.mean(v), Variable([], 2))
self.assertVariableIdentical(v.prod(), Variable([], 6))
self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))
if LooseVersion(np.__version__) < '1.9':
with self.assertRaises(NotImplementedError):
v.median()
else:
self.assertVariableIdentical(v.median(), Variable([], 2))
v = Variable('x', [True, False, False])
self.assertVariableIdentical(v.any(), Variable([], True))
self.assertVariableIdentical(v.all(dim='x'), Variable([], False))
v = Variable('t', pd.date_range('2000-01-01', periods=3))
with self.assertRaises(NotImplementedError):
v.max(skipna=True)
self.assertVariableIdentical(
v.max(), Variable([], pd.Timestamp('2000-01-03')))
def test_reduce_keep_attrs(self):
_attrs = {'units': 'test', 'long_name': 'testing'}
v = Variable(['x', 'y'], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = v.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(_attrs))
self.assertEqual(vm.attrs, _attrs)
def test_count(self):
expected = Variable([], 3)
actual = Variable(['x'], [1, 2, 3, np.nan]).count()
self.assertVariableIdentical(expected, actual)
v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
actual = v.count()
self.assertVariableIdentical(expected, actual)
actual = Variable(['x'], [True, False, True]).count()
self.assertVariableIdentical(expected, actual)
self.assertEqual(actual.dtype, int)
expected = Variable(['x'], [2, 3])
actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
self.assertVariableIdentical(expected, actual)
class TestCoordinate(TestCase, VariableSubclassTestCases):
cls = staticmethod(Coordinate)
def test_init(self):
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
Coordinate((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = Coordinate(['time'], data, {'foo': 'bar'})
self.assertTrue(pd.Index(data, name='time').identical(v.to_index()))
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
v = Coordinate(['x'], midx, {'foo': 'bar'})
self.assertEqual(v.to_index().names, ('x_level_0', 'x_level_1'))
def test_data(self):
x = Coordinate('x', np.arange(3.0))
# data should be initially saved as an ndarray
self.assertIs(type(x._data), np.ndarray)
self.assertEqual(float, x.dtype)
self.assertArrayEqual(np.arange(3), x)
self.assertEqual(float, x.values.dtype)
# after inspecting x.values, the Coordinate value will be saved as an Index
self.assertIsInstance(x._data, PandasIndexAdapter)
with self.assertRaisesRegexp(TypeError, 'cannot be modified'):
x[:] = 0
def test_name(self):
coord = Coordinate('x', [10.0])
self.assertEqual(coord.name, 'x')
with self.assertRaises(AttributeError):
coord.name = 'y'
def test_concat_periods(self):
periods = pd.period_range('2000-01-01', periods=10)
coords = [Coordinate('t', periods[:5]), Coordinate('t', periods[5:])]
expected = Coordinate('t', periods)
actual = Coordinate.concat(coords, dim='t')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = Coordinate.concat(coords, dim='t', positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ['a', 'b']])
coords = [Coordinate('x', idx[:2]), Coordinate('x', idx[2:])]
expected = Coordinate('x', idx)
actual = Coordinate.concat(coords, dim='x')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
class TestAsCompatibleData(TestCase):
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray)
for t in types:
for data in [np.arange(3),
pd.date_range('2000-01-01', periods=3),
pd.date_range('2000-01-01', periods=3).values]:
x = t(data)
self.assertIs(source_ndarray(x),
source_ndarray(as_compatible_data(x)))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
self.assertArrayEqual(np.asarray(input_array), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.asarray(input_array).dtype, actual.dtype)
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(int), actual.dtype)
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(float), actual.dtype)
def test_datetime(self):
expected = np.datetime64('2000-01-01T00Z')
actual = as_compatible_data(expected)
self.assertEqual(expected, actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
self.assertIs(expected, source_ndarray(np.asarray(actual)))
expected = np.datetime64('2000-01-01T00Z', 'ns')
actual = as_compatible_data(datetime(2000, 1, 1))
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
|
apache-2.0
|
shanot/imp
|
modules/pmi/test/test_tools.py
|
1
|
32163
|
from __future__ import print_function
import IMP
import os
import IMP.test
import IMP.core
import IMP.container
import IMP.pmi
import IMP.pmi.topology
import IMP.pmi.dof
import IMP.pmi.io
import IMP.pmi.io.crosslink
import IMP.pmi.representation
import IMP.pmi.restraints
import IMP.pmi.restraints.em
import IMP.pmi.restraints.crosslinking
import IMP.pmi.macros
import RMF
import IMP.rmf
from math import *
class Tests(IMP.test.TestCase):
def test_particle_to_sample_filter(self):
"""Test ParticleToSampleFilter"""
class MockRestraint(object):
def __init__(self, sos):
self.sos = sos
def get_particles_to_sample(self):
return self.sos
r1 = MockRestraint({'Nuisances_Sigma': ('p0', 'p1'),
'Nuisances_Psi': ('p2', 'p3')})
r2 = MockRestraint({'Nuisances_Sigma': ('p0', 'p4')})
with IMP.allow_deprecated():
p = IMP.pmi.tools.ParticleToSampleFilter([r1, r2])
p.add_filter('Sigma')
ps = p.get_particles_to_sample()
self.assertEqual(list(ps.keys()), ['Nuisances_Sigma'])
val = ps['Nuisances_Sigma']
self.assertEqual(sorted(val), ['p0', 'p0', 'p1', 'p4'])
def test_particle_to_sample_list(self):
"""Test ParticleToSampleList"""
p = IMP.pmi.tools.ParticleToSampleList()
self.assertEqual(p.label, 'None')
self.assertRaises(TypeError, p.add_particle, 'P0', 'bad_type', 1, 'foo')
p.add_particle('RB1', 'Rigid_Bodies', (1., 2.), 'myRB1')
# Test bad rigid body transformation
self.assertRaises(TypeError, p.add_particle,
'RB1', 'Rigid_Bodies', [1., 2.], 'myRB1')
p.add_particle('S1', 'Surfaces', (1., 2., 3.), 'myS1')
self.assertRaises(TypeError, p.add_particle,
'S1', 'Surfaces', [1., 2.], 'myS1')
p.add_particle('F1', 'Floppy_Bodies', 1., 'myF1')
self.assertRaises(TypeError, p.add_particle,
'F1', 'Floppy_Bodies', 'badtransform', 'myF1')
self.assertEqual(p.get_particles_to_sample(),
{'SurfacesParticleToSampleList_myS1_None':
(['S1'], (1.0, 2.0, 3.0)),
'Rigid_BodiesParticleToSampleList_myRB1_None':
(['RB1'], (1.0, 2.0)),
'Floppy_BodiesParticleToSampleList_myF1_None': (['F1'], 1.0)})
def test_shuffle(self):
"""Test moving rbs, fbs"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('chainA.fasta'))
st1 = s.create_state()
mol = st1.create_molecule("GCP2_YEAST",sequence=seqs["GCP2_YEAST"][:100],chain_id='A')
atomic_res = mol.add_structure(self.get_input_file_name('chainA.pdb'),
chain_id='A',
res_range=(1,100))
mol.add_representation(mol.get_atomic_residues(),resolutions=[10])
mol.add_representation(mol.get_non_atomic_residues(), resolutions=[10])
mol2 = mol.create_clone('B')
hier = s.build()
dof = IMP.pmi.dof.DegreesOfFreedom(mdl)
mv,rb1 = dof.create_rigid_body(mol, nonrigid_parts=mol.get_non_atomic_residues())
mv,rb2 = dof.create_rigid_body(mol2, nonrigid_parts=mol2.get_non_atomic_residues())
results = IMP.pmi.tools.shuffle_configuration(hier,return_debug=True)
ps1 = IMP.get_indexes(IMP.core.get_leaves(mol.get_hierarchy()))
ps2 = IMP.get_indexes(IMP.core.get_leaves(mol2.get_hierarchy()))
self.assertEqual(len(results),16)
self.assertEqual(results[0],[rb1,set(ps2)])
self.assertEqual(results[1],[rb2,set(ps1)])
for r in results[2:]:
self.assertFalse(r[1])
# test it works if you pass particles
r2 = IMP.pmi.tools.shuffle_configuration(IMP.core.get_leaves(hier),return_debug=True)
self.assertEqual(len(r2),16)
self.assertEqual(r2[0],[rb1,set(ps2)])
self.assertEqual(r2[1],[rb2,set(ps1)])
for r in r2[2:]:
self.assertFalse(r[1])
def test_shuffle_box(self):
"""Test shuffling rbs, fbs with bounding box"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('chainA.fasta'))
st1 = s.create_state()
mol = st1.create_molecule("GCP2_YEAST",sequence=seqs["GCP2_YEAST"][:100],chain_id='A')
atomic_res = mol.add_structure(self.get_input_file_name('chainA.pdb'),
chain_id='A',
res_range=(1,100))
mol.add_representation(mol.get_atomic_residues(),resolutions=[10])
mol.add_representation(mol.get_non_atomic_residues(), resolutions=[10])
mol2 = mol.create_clone('B')
mol3 = st1.create_molecule("GCP2_YEAST_BEADS",sequence=seqs["GCP2_YEAST"][:100],chain_id='C')
mol3.add_representation(mol3.get_non_atomic_residues(), resolutions=[10])
hier = s.build()
dof = IMP.pmi.dof.DegreesOfFreedom(mdl)
mv,rb1 = dof.create_rigid_body(mol, nonrigid_parts=mol.get_non_atomic_residues())
mv,rb2 = dof.create_rigid_body(mol2, nonrigid_parts=mol2.get_non_atomic_residues())
results = IMP.pmi.tools.shuffle_configuration(hier,
bounding_box=((100,100,100),
(200,200,200)),return_debug=True)
rbs_trans_after={}
fbs_position_after={}
rbs,fbs = IMP.pmi.tools.get_rbs_and_beads([hier])
for rb in rbs:
coor_rb = IMP.core.XYZ(rb).get_coordinates()
self.assertTrue(100.0 <coor_rb[0]< 200.0)
self.assertTrue(100.0 <coor_rb[1]< 200.0)
self.assertTrue(100.0 <coor_rb[2]< 200.0)
for fb in fbs:
if IMP.core.NonRigidMember.get_is_setup(fb):
coor_fb=IMP.algebra.Vector3D([fb.get_value(IMP.FloatKey(4)),
fb.get_value(IMP.FloatKey(5)),
fb.get_value(IMP.FloatKey(6))])
self.assertTrue(100.0 <coor_fb[0]< 200.0)
self.assertTrue(100.0 <coor_fb[1]< 200.0)
self.assertTrue(100.0 <coor_fb[2]< 200.0)
else:
coor_fb=IMP.core.XYZ(fb).get_coordinates()
self.assertTrue(100.0 <coor_fb[0]< 200.0)
self.assertTrue(100.0 <coor_fb[1]< 200.0)
self.assertTrue(100.0 <coor_fb[2]< 200.0)
def test_shuffle_deep(self):
"""Test moving rbs, fbs"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('chainA.fasta'))
st1 = s.create_state()
mol = st1.create_molecule("GCP2_YEAST",sequence=seqs["GCP2_YEAST"][:100],chain_id='A')
atomic_res = mol.add_structure(self.get_input_file_name('chainA.pdb'),
chain_id='A',
res_range=(1,100))
mol.add_representation(mol.get_atomic_residues(),resolutions=[10])
mol.add_representation(mol.get_non_atomic_residues(), resolutions=[10])
mol2 = mol.create_clone('B')
mol3 = st1.create_molecule("GCP2_YEAST_BEADS",sequence=seqs["GCP2_YEAST"][:100],chain_id='C')
mol3.add_representation(mol3.get_non_atomic_residues(), resolutions=[10])
hier = s.build()
dof = IMP.pmi.dof.DegreesOfFreedom(mdl)
dof.create_rigid_body(mol, nonrigid_parts=mol.get_non_atomic_residues())
dof.create_rigid_body(mol2, nonrigid_parts=mol2.get_non_atomic_residues())
dof.create_flexible_beads(mol3.get_non_atomic_residues(),max_trans=1.0)
rbs,fbs = IMP.pmi.tools.get_rbs_and_beads([hier])
rbs_trans_before={}
fbs_position_before={}
rbs_trans_after={}
fbs_position_after={}
for rb in rbs:
rbs_trans_before[rb]=rb.get_reference_frame().get_transformation_to()
for fb in fbs:
if IMP.core.NonRigidMember.get_is_setup(fb):
fbs_position_before[fb]=IMP.algebra.Vector3D(
[fb.get_value(IMP.FloatKey(4)),
fb.get_value(IMP.FloatKey(5)),
fb.get_value(IMP.FloatKey(6))])
else:
fbs_position_before[fb]=IMP.core.XYZ(fb).get_coordinates()
IMP.pmi.tools.shuffle_configuration(hier)
for rb in rbs:
rbs_trans_after[rb]=rb.get_reference_frame().get_transformation_to()
for fb in fbs:
if IMP.core.NonRigidMember.get_is_setup(fb):
fbs_position_after[fb]=IMP.algebra.Vector3D(
[fb.get_value(IMP.FloatKey(4)),
fb.get_value(IMP.FloatKey(5)),
fb.get_value(IMP.FloatKey(6))])
else:
fbs_position_after[fb]=IMP.core.XYZ(fb).get_coordinates()
for fb in fbs:
position_after=fbs_position_after[fb]
position_before=fbs_position_before[fb]
for i in [0,1,2]:
self.assertNotEqual(position_after[i],position_before[i])
for rb in rbs:
position_after=rbs_trans_after[rb].get_translation()
position_before=rbs_trans_before[rb].get_translation()
rotation_after=rbs_trans_after[rb].get_rotation()*IMP.algebra.Vector3D(1,1,1)
rotation_before=rbs_trans_before[rb].get_rotation()*IMP.algebra.Vector3D(1,1,1)
for i in [0,1,2]:
self.assertNotEqual(position_after[i],position_before[i])
self.assertNotEqual(rotation_after[i],rotation_before[i])
def test_select_all_resolutions_and_densities_sklearn(self):
"""Test this actually check selec_all_resoltions and get_densities"""
try:
import sklearn
except:
self.skipTest("Require sklearn for this test")
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('chainA.fasta'))
st1 = s.create_state()
mol = st1.create_molecule("GCP2_YEAST",sequence=seqs["GCP2_YEAST"][:100],chain_id='A')
atomic_res = mol.add_structure(self.get_input_file_name('chainA.pdb'),
chain_id='A',
res_range=(1,100))
rs=[0,10]
mol.add_representation(mol.get_atomic_residues(),
resolutions=rs,
density_prefix='testselect',
density_voxel_size=0,
density_residues_per_component=10)
mol.add_representation(mol.get_non_atomic_residues(),
resolutions=[10],
setup_particles_as_densities=True)
hier = s.build()
ps = IMP.pmi.tools.select_at_all_resolutions(mol.get_hierarchy(),residue_index=93)
self.assertEqual(len(ps),14) #should get res0, res10, and ALL densities
leaves=[]
for r in rs:
leaves+=IMP.atom.Selection(hier,resolution=r).get_selected_particles()
dens=IMP.atom.Selection(hier,representation_type=IMP.atom.DENSITIES).get_selected_particles()
leaves+=dens
ps = IMP.pmi.tools.select_at_all_resolutions(mol.get_hierarchy())
inds1=sorted(list(set([p.get_index() for p in leaves])))
inds2=sorted(p.get_index() for p in ps)
self.assertEqual(inds1,inds2)
#check densities
dens_test=IMP.pmi.tools.get_densities(hier)
inds1=sorted(p.get_index() for p in dens)
inds2=sorted(p.get_index() for p in dens_test)
self.assertEqual(inds1,inds2)
dens_test=IMP.pmi.tools.get_densities([mol])
inds1=sorted(p.get_index() for p in dens)
inds2=sorted(p.get_index() for p in dens_test)
self.assertEqual(inds1,inds2)
os.unlink('testselect.txt')
def test_get_molecules(self):
"""Test that get_molecules correctly selected IMP.atom.Molecules"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
m2 = st1.create_molecule("Prot2",sequence=seqs["Protein_2"])
m3 = st1.create_molecule("Prot3",sequence=seqs["Protein_3"])
a1 = m1.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='A',res_range=(55,63),offset=-54)
a2 = m2.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='B',res_range=(180,192),offset=-179)
a3 = m3.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='G',res_range=(55,63),offset=-54)
m1.add_representation(a1,resolutions=[0,1])
m1.add_representation(m1.get_non_atomic_residues(),resolutions=[1])
m2.add_representation(a2,resolutions=[0,1]) # m2 only has atoms
m3.add_representation(a3,resolutions=[1,10])
m3.add_representation(m3.get_non_atomic_residues(),resolutions=[1], setup_particles_as_densities=True)
hier = s.build()
ind1=m1.hier.get_particle_index()
ind2=m2.hier.get_particle_index()
ind3=m3.hier.get_particle_index()
mols=IMP.pmi.tools.get_molecules(hier)
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1,ind2,ind3])
mols=IMP.pmi.tools.get_molecules([m1,m2,m3])
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1,ind2,ind3])
mols=IMP.pmi.tools.get_molecules([m1,m2])
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1,ind2])
mols=IMP.pmi.tools.get_molecules(IMP.atom.get_leaves(m1.hier))
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1])
mols=IMP.pmi.tools.get_molecules(IMP.atom.get_leaves(hier))
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1,ind2,ind3])
mols=IMP.pmi.tools.get_molecules([IMP.atom.get_leaves(m1.hier)[0],IMP.atom.get_leaves(m3.hier)[1]])
inds=sorted(p.get_particle_index() for p in mols)
self.assertEqual(inds,[ind1,ind3])
def test_select_at_all_resolutions_no_density(self):
"""More stringent and runs without sklearn"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('chainA.fasta'))
st1 = s.create_state()
mol = st1.create_molecule("GCP2_YEAST",sequence=seqs["GCP2_YEAST"][:100],chain_id='A')
rs=[0,1,3,10]
atomic_res = mol.add_structure(self.get_input_file_name('chainA.pdb'),
chain_id='A',
res_range=(1,100))
mol.add_representation(mol.get_atomic_residues(),
resolutions=rs,
density_residues_per_component=0)
mol.add_representation(mol.get_non_atomic_residues(),
resolutions=[10],
setup_particles_as_densities=True)
hier = s.build()
leaves=[]
for r in rs:
leaves+=IMP.atom.Selection(hier,resolution=r).get_selected_particles()
ps = IMP.pmi.tools.select_at_all_resolutions(mol.get_hierarchy())
inds1=sorted(list(set([p.get_index() for p in leaves])))
inds2=sorted(p.get_index() for p in ps)
self.assertEqual(inds1,inds2)
def test_get_name(self):
"""Test pmi::get_molecule_name_and_copy()"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
a1 = m1.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='A',res_range=(55,63),offset=-54)
m1.add_representation(a1,resolutions=[0,1])
m2 = m1.create_clone('B')
hier = s.build()
sel0 = IMP.atom.Selection(hier,resolution=1,copy_index=0).get_selected_particles()
self.assertEqual(IMP.pmi.get_molecule_name_and_copy(sel0[0]),"Prot1.0")
sel1 = IMP.atom.Selection(hier,resolution=1,copy_index=1).get_selected_particles()
self.assertEqual(IMP.pmi.get_molecule_name_and_copy(sel1[0]),"Prot1.1")
def test_get_densities(self):
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m3 = st1.create_molecule("Prot3",sequence=seqs["Protein_3"])
a3 = m3.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='G',res_range=(55,63),offset=-54)
m3.add_representation(a3,resolutions=[1,10])
m3.add_representation(m3.get_non_atomic_residues(),resolutions=[1], setup_particles_as_densities=True)
hier = s.build()
densities = IMP.pmi.tools.get_densities(m3)
densities_test=IMP.atom.Selection(hier,representation_type=IMP.atom.DENSITIES).get_selected_particles()
self.assertEqual(densities,densities_test)
densities = IMP.pmi.tools.get_densities(m3.get_hierarchy())
self.assertEqual(densities,densities_test)
densities = IMP.pmi.tools.get_densities(hier)
self.assertEqual(densities,densities_test)
densities = IMP.pmi.tools.get_densities(IMP.atom.get_leaves(hier))
self.assertEqual(densities,densities_test)
def test_input_adaptor_pmi(self):
"""Test that input adaptor correctly performs selection"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
m2 = st1.create_molecule("Prot2",sequence=seqs["Protein_2"])
m3 = st1.create_molecule("Prot3",sequence=seqs["Protein_3"])
a1 = m1.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='A',res_range=(55,63),offset=-54)
a2 = m2.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='B',res_range=(180,192),offset=-179)
a3 = m3.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='G',res_range=(55,63),offset=-54)
m1.add_representation(a1,resolutions=[0,1])
m1.add_representation(m1.get_non_atomic_residues(),resolutions=[1])
m2.add_representation(a2,resolutions=[0,1]) # m2 only has atoms
m3.add_representation(a3,resolutions=[1,10])
m3.add_representation(m3.get_non_atomic_residues(),resolutions=[1], setup_particles_as_densities=True)
hier = s.build()
densities = [r.get_hierarchy() for r in m3.get_non_atomic_residues()]
#set up GMM particles
gemt = IMP.pmi.restraints.em.GaussianEMRestraint(densities,
self.get_input_file_name('prot_gmm.txt'),
target_is_rigid_body=True)
gmm_hier = gemt.get_density_as_hierarchy()
test0 = IMP.pmi.tools.input_adaptor(gmm_hier)
self.assertEqual(test0, [IMP.atom.get_leaves(gmm_hier)])
# get one resolution
test1 = IMP.pmi.tools.input_adaptor(m1,pmi_resolution=0)
self.assertEqual(test1,[IMP.atom.Selection(m1.get_hierarchy(),
resolution=0).get_selected_particles()])
# get all resolutions
test1all = IMP.pmi.tools.input_adaptor(m1,pmi_resolution='all')
compare1all = set(IMP.atom.Selection(m1.get_hierarchy(),
resolution=0).get_selected_particles()+
IMP.atom.Selection(m1.get_hierarchy(),
resolution=1).get_selected_particles())
self.assertEqual(set(test1all[0]),compare1all)
# list of set of TempResidue
test3 = IMP.pmi.tools.input_adaptor([m1[0:3],m2[:],m3[0:1]],
pmi_resolution=1)
compare3 = [IMP.atom.Selection(m1.get_hierarchy(),
residue_indexes=[1,2,3],
resolution=1).get_selected_particles(),
IMP.atom.Selection(m2.get_hierarchy(),
resolution=1).get_selected_particles(),
IMP.atom.Selection(m3.get_hierarchy(),
residue_index=1,
resolution=1).get_selected_particles()]
self.assertEqual([set(l) for l in test3],[set(l) for l in compare3])
# check robustness and consistency TempResidue + Hierarchy
test4 = IMP.pmi.tools.input_adaptor([m1[0:3],m2[:],m3[0:1]],flatten=True,pmi_resolution=1)
compare4=IMP.pmi.tools.input_adaptor(compare3,pmi_resolution=1,flatten=True)
compare5=IMP.pmi.tools.input_adaptor(compare3,flatten=True)
compare6=IMP.pmi.tools.input_adaptor(compare3,pmi_resolution='all',flatten=True)
self.assertEqual(test4,compare4)
self.assertEqual(test4,compare5)
self.assertEqual(test4,compare6)
# check input is list or list of lists
test5 = IMP.pmi.tools.input_adaptor([m1,m2,m3],
pmi_resolution=1)
test6 = IMP.pmi.tools.input_adaptor([[m1,m2],[m3]],
pmi_resolution=1)
self.assertEqual(test5,test6)
# test input particles and input hierarchies
compare7 = [IMP.atom.Hierarchy(p) for sublist in compare3 for p in sublist ]
test7 = IMP.pmi.tools.input_adaptor(compare7,flatten=True,pmi_resolution=1)
compare8 = [p for sublist in compare3 for p in sublist]
test8 = IMP.pmi.tools.input_adaptor(compare8,flatten=True,pmi_resolution=1)
self.assertEqual(test7,test8)
# raises if passing an uneven list
with self.assertRaises(Exception):
IMP.pmi.tools.input_adaptor([[m1,m2],m3],pmi_resolution=1)
#raises if passing mixed lists
with self.assertRaises(Exception):
IMP.pmi.tools.input_adaptor([m1,s,m3],pmi_resolution=1)
# compare hierarchies and pmi molecules:
m1s_pmi = IMP.pmi.tools.input_adaptor(m1,pmi_resolution=1)
m1s_hiers = IMP.pmi.tools.input_adaptor(m1.hier,pmi_resolution=1)
self.assertEqual([set(l) for l in m1s_pmi],[set(l) for l in m1s_hiers])
m1s_pmi = IMP.pmi.tools.input_adaptor(m1,pmi_resolution=10)
m1s_hiers = IMP.pmi.tools.input_adaptor(m1.hier,pmi_resolution=10)
self.assertEqual([set(l) for l in m1s_pmi],[set(l) for l in m1s_hiers])
m1s_pmi = IMP.pmi.tools.input_adaptor(m1,pmi_resolution='all')
m1s_hiers = IMP.pmi.tools.input_adaptor(m1.hier,pmi_resolution='all')
self.assertEqual([set(l) for l in m1s_pmi],[set(l) for l in m1s_hiers])
# compare hierarchies and pmi states:
st1s_pmi = IMP.pmi.tools.input_adaptor(st1,pmi_resolution=1,flatten=True)
st1s_hiers = IMP.pmi.tools.input_adaptor(st1.hier,pmi_resolution=1,flatten=True)
self.assertEqual(st1s_pmi,st1s_hiers)
st1s_pmi = IMP.pmi.tools.input_adaptor(st1,pmi_resolution=10,flatten=True)
st1s_hiers = IMP.pmi.tools.input_adaptor(st1.hier,pmi_resolution=10,flatten=True)
self.assertEqual(st1s_pmi,st1s_hiers)
st1s_pmi = IMP.pmi.tools.input_adaptor(st1,pmi_resolution='all',flatten=True)
st1s_hiers = IMP.pmi.tools.input_adaptor(st1.hier,pmi_resolution='all',flatten=True)
self.assertEqual(st1s_pmi,st1s_hiers)
# compare hierarchies and pmi system:
sys_pmi = IMP.pmi.tools.input_adaptor(s,pmi_resolution=1,flatten=True)
sys_hiers = IMP.pmi.tools.input_adaptor(s.hier,pmi_resolution=1,flatten=True)
self.assertEqual(sys_pmi,sys_hiers)
sys_pmi = IMP.pmi.tools.input_adaptor(s,pmi_resolution=10,flatten=True)
sys_hiers = IMP.pmi.tools.input_adaptor(s.hier,pmi_resolution=10,flatten=True)
self.assertEqual(sys_pmi,sys_hiers)
sys_pmi = IMP.pmi.tools.input_adaptor(s,pmi_resolution='all',flatten=True)
sys_hiers = IMP.pmi.tools.input_adaptor(s.hier,pmi_resolution='all',flatten=True)
self.assertEqual(sys_pmi,sys_hiers)
# nothing changes to hierarchy
p=IMP.Particle(mdl)
h=IMP.atom.Hierarchy.setup_particle(p)
IMP.atom.Mass.setup_particle(p,1.0)
xyzr=IMP.core.XYZR.setup_particle(p)
xyzr.set_coordinates((0,0,0))
xyzr.set_radius(1.0)
tH = [h]
testH = IMP.pmi.tools.input_adaptor(tH)
self.assertEqual(testH,[tH])
# check passing system,state
testSystem = [set(l) for l in
IMP.pmi.tools.input_adaptor(s,pmi_resolution=0)]
testState = [set(l) for l in
IMP.pmi.tools.input_adaptor(st1,pmi_resolution=0)]
compareAll = [set(IMP.atom.Selection(m.get_hierarchy(),
resolution=0).get_selected_particles()) for m in [m1,m2,m3]]
# get_molecules() returns a dict, so the order of testSystem
# and testState is not guaranteed
self.assertEqualUnordered(testSystem, compareAll)
self.assertEqualUnordered(testState, compareAll)
def test_input_adaptor_non_pmi(self):
mdl = IMP.Model()
root=IMP.atom.Hierarchy(IMP.Particle(mdl))
for i in range(10):
p=IMP.Particle(mdl)
h=IMP.atom.Hierarchy.setup_particle(p)
IMP.atom.Mass.setup_particle(p,1.0)
xyzr=IMP.core.XYZR.setup_particle(p)
xyzr.set_coordinates((0,0,0))
xyzr.set_radius(1.0)
root.add_child(h)
hs=IMP.pmi.tools.input_adaptor(root)
self.assertEqual([IMP.atom.get_leaves(root)],hs)
hs=IMP.pmi.tools.input_adaptor(root,pmi_resolution=1)
self.assertEqual([IMP.atom.get_leaves(root)],hs)
def test_Segments(self):
s=IMP.pmi.tools.Segments(1)
self.assertEqual(s.segs,[[1]])
s=IMP.pmi.tools.Segments([1])
self.assertEqual(s.segs,[[1]])
s=IMP.pmi.tools.Segments([1,1])
self.assertEqual(s.segs,[[1]])
s=IMP.pmi.tools.Segments([1,2])
self.assertEqual(s.segs,[[1,2]])
s=IMP.pmi.tools.Segments([1,2,3])
self.assertEqual(s.segs,[[1,2,3]])
s=IMP.pmi.tools.Segments([1,2,3,5])
self.assertEqual(s.segs,[[1,2,3],[5]])
s.add(6)
self.assertEqual(s.segs,[[1,2,3],[5,6]])
s.add(0)
self.assertEqual(s.segs,[[0,1,2,3],[5,6]])
s.add(3)
self.assertEqual(s.segs,[[0,1,2,3],[5,6]])
s.add(4)
self.assertEqual(s.segs,[[0,1,2,3,4,5,6]])
s.add([-3,-4])
self.assertEqual(s.segs,[[-4,-3],[0,1,2,3,4,5,6]])
s.remove(2)
self.assertEqual(s.segs,[[-4,-3],[0,1],[3,4,5,6]])
s.remove(5)
self.assertEqual(s.segs,[[-4,-3],[0,1],[3,4],[6]])
s.remove(5)
self.assertEqual(s.segs,[[-4,-3],[0,1],[3,4],[6]])
s.add(-1)
self.assertEqual(s.segs,[[-4,-3],[-1,0,1],[3,4],[6]])
def assertEqualUnordered(self, a, b):
"""Compare two unordered lists; i.e. each list must have the
same elements, but possibly in a different order"""
self.assertEqual(len(a), len(b))
for i in a + b:
self.assertIn(i, a)
self.assertIn(i, b)
def test_get_is_canonical(self):
"""Test get PMI2 structures are canonical"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
a1 = m1.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='A',res_range=(55,63),offset=-54)
m1.add_representation(a1,resolutions=[0,1])
hier = s.build()
sel0 = IMP.atom.Selection(hier,molecule="Prot1",resolution=0).get_selected_particles()
sel1 = IMP.atom.Selection(hier,molecule="Prot1",resolution=1).get_selected_particles()
for p in sel0+sel1:
self.assertTrue(IMP.pmi.get_is_canonical(p))
def test_set_coordinates_from_rmf(self):
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
st1 = s.create_state()
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
a1 = m1.add_structure(self.get_input_file_name('prot.pdb'),
chain_id='A',res_range=(55,63),offset=-54)
m1.add_representation(a1,resolutions=[0,1])
m1.add_representation(m1.get_residues()-a1,resolutions=1)
hier = s.build()
sel = IMP.atom.Selection(hier,resolution=IMP.atom.ALL_RESOLUTIONS).get_selected_particles()
orig_coords = [IMP.core.XYZ(p).get_coordinates() for p in sel]
fname = self.get_tmp_file_name('test_set_coords.rmf3')
rh = RMF.create_rmf_file(fname)
IMP.rmf.add_hierarchy(rh, hier)
IMP.rmf.save_frame(rh)
del rh
for p in sel:
IMP.core.transform(IMP.core.XYZ(p),IMP.algebra.Transformation3D([10,10,10]))
coords1 = [IMP.core.XYZ(p).get_coordinates() for p in sel]
for c0,c1 in zip(orig_coords,coords1):
self.assertNotEqual(IMP.algebra.get_distance(c0,c1),0.0)
IMP.pmi.tools.set_coordinates_from_rmf(hier,fname,0)
coords2 = [IMP.core.XYZ(p).get_coordinates() for p in sel]
for c0,c2 in zip(orig_coords,coords2):
self.assertAlmostEqual(IMP.algebra.get_distance(c0,c2),0.0)
def test_threetoone(self):
import string
import random
def id_generator(size=3, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
threetoone = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D',
'CYS': 'C', 'GLU': 'E', 'GLN': 'Q', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K',
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S',
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V', 'UNK': 'X'}
tto=IMP.pmi.tools.ThreeToOneConverter(is_nucleic=False)
for key in threetoone:
self.assertEqual(threetoone[key],tto[key])
for s in range(10):
id=id_generator()
if id in threetoone:
self.assertEqual(threetoone[id], tto[id])
else:
self.assertEqual("X",tto[id])
threetoone = {'ADE': 'A', 'URA': 'U', 'CYT': 'C', 'GUA': 'G',
'THY': 'T', 'UNK': 'X'}
tto = IMP.pmi.tools.ThreeToOneConverter(is_nucleic=True)
for key in threetoone:
self.assertEqual(threetoone[key], tto[key])
for s in range(10):
id = id_generator()
if id in threetoone:
self.assertEqual(threetoone[id], tto[id])
else:
self.assertEqual("X", tto[id])
if __name__ == '__main__':
IMP.test.main()
|
gpl-3.0
|
russel1237/scikit-learn
|
examples/applications/topics_extraction_with_nmf_lda.py
|
18
|
3768
|
"""
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, #max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features,"
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf)
exit()
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.