repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xi-studio/anime
|
src/convert_midi.py
|
1
|
2257
|
import pretty_midi
import numpy as np
import cPickle
import gzip
import glob
import matplotlib.pyplot as plt
from scipy.misc import imsave
fs = 10.0
note = (21,109)
def midi2pianoroll(dataset):
files = glob.glob(dataset)
num = 0
l = []
for f in files:
filename = f.replace('.mid','.png')
pm = pretty_midi.PrettyMIDI(f)
res = pm.get_piano_roll(fs=fs)
res[np.where(res>0)] = 1
data = res[21:109,:]
data = data.T
for x in range(data.shape[0]-100):
l.append(data[x:x+100].reshape(-1))
print f
idx = np.arange(len(l))
l = np.array(l)
res = l[:10000]
print l.shape
print l.dtype
return res
def piano_roll_to_pretty_midi(pr, program=1):
piano_roll = np.zeros((128,pr.shape[1]))
piano_roll[21:109,:] = pr
print np.max(pr)
notes, frames = piano_roll.shape
pm = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(program=program)
# pad 1 column of zeros so we can acknowledge inital and ending events
piano_roll = np.pad(piano_roll, [(0, 0), (1, 1)], 'constant')
# use changes in velocities to find note on / note off events
velocity_changes = np.nonzero(np.diff(piano_roll).T)
# keep track on velocities and note on times
prev_velocities = np.zeros(notes, dtype=int)
note_on_time = np.zeros(notes)
for time, note in zip(*velocity_changes):
velocity = piano_roll[note, time + 1]
time = time / fs
if velocity > 0:
if prev_velocities[note] == 0:
note_on_time[note] = time
prev_velocities[note] = velocity
else:
pm_note = pretty_midi.Note(
velocity=prev_velocities[note],
pitch=note,
start=note_on_time[note],
end=time)
instrument.notes.append(pm_note)
prev_velocities[note] = 0
pm.instruments.append(instrument)
return pm
if __name__=="__main__":
data = midi2pianoroll('../data/midi_set/*.mid')
data = data.astype(np.int8)
res = (data,np.ones(data.shape[0]))
with gzip.open("../data/midi.pkl.gz",'wb') as f:
cPickle.dump(res,f)
|
mit
|
mlperf/training_results_v0.6
|
Google/benchmarks/transformer/implementations/tpu-v3-512-transformer/dataset_preproc/data_generators/text_encoder.py
|
6
|
36278
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* ByteTextEncoder: for ascii text
* TokenTextEncoder: with user-supplied vocabulary file
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import math
import re
import tempfile
import time
import numpy as np
import six
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import tokenizer
import tensorflow as tf
# Reserved tokens for things like padding and EOS symbols.
PAD = "<pad>"
EOS = "<EOS>"
RESERVED_TOKENS = [PAD, EOS]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if is_unicode(s):
return s
try:
return to_unicode(s)
except UnicodeDecodeError:
res = to_unicode(s, ignore_errors=True)
tf.logging.info("Ignoring Unicode error, outputting: %s" % res)
return res
def unicode_to_native(s):
if six.PY2:
return s.encode("utf-8") if is_unicode(s) else s
else:
return s
def is_unicode(s):
if six.PY2:
if isinstance(s, unicode):
return True
else:
if isinstance(s, str):
return True
return False
def to_unicode(s, ignore_errors=False):
if is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def to_unicode_ignore_errors(s):
return to_unicode(s, ignore_errors=True)
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
self._num_reserved_ids = num_reserved_ids
@property
def num_reserved_ids(self):
return self._num_reserved_ids
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids))
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids]
@property
def vocab_size(self):
raise NotImplementedError()
class ByteTextEncoder(TextEncoder):
"""Encodes each byte to an id. For 8-bit strings only."""
def encode(self, s):
numres = self._num_reserved_ids
if six.PY2:
if isinstance(s, unicode):
s = s.encode("utf-8")
return [ord(c) + numres for c in s]
# Python3: explicitly convert to UTF-8
return [c + numres for c in s.encode("utf-8")]
def decode(self, ids, strip_extraneous=False):
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
if six.PY2:
return "".join(decoded_ids)
# Python3: join byte arrays and then decode string
return b"".join(decoded_ids).decode("utf-8", "replace")
def decode_list(self, ids):
numres = self._num_reserved_ids
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < numres:
decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
else:
decoded_ids.append(int2byte(id_ - numres))
# Python3: join byte arrays and then decode string
return decoded_ids
@property
def vocab_size(self):
return 2**8 + self._num_reserved_ids
class ClassLabelEncoder(TextEncoder):
"""Encoder for class labels."""
def __init__(self, class_labels=None, class_labels_fname=None):
super(ClassLabelEncoder, self).__init__(num_reserved_ids=0)
if class_labels_fname:
with tf.gfile.Open(class_labels_fname) as f:
class_labels = [label.strip() for label in f.readlines()]
assert class_labels
self._class_labels = class_labels
def encode(self, s):
label_str = s
return self._class_labels.index(label_str)
def decode(self, ids, strip_extraneous=False):
del strip_extraneous
label_id = ids
if isinstance(label_id, list):
assert len(label_id) == 1
label_id, = label_id
if isinstance(label_id, np.ndarray):
label_id = np.squeeze(label_id)
return self._class_labels[label_id]
def decode_list(self, ids):
return [self._class_labels[i] for i in ids]
@property
def vocab_size(self):
return len(self._class_labels)
class OneHotClassLabelEncoder(ClassLabelEncoder):
"""One-hot encoder for class labels."""
def encode(self, label_str, on_value=1, off_value=0): # pylint: disable=arguments-differ
e = np.full(self.vocab_size, off_value, dtype=np.int32)
e[self._class_labels.index(label_str)] = on_value
return e.tolist()
def decode(self, ids, strip_extraneous=False):
del strip_extraneous
label_id = ids
if isinstance(label_id, np.ndarray):
label_id = np.squeeze(label_id).astype(np.int8).tolist()
assert isinstance(label_id, list)
assert len(label_id) == self.vocab_size
return self._class_labels[label_id.index(1)]
@property
def vocab_size(self):
return len(self._class_labels)
class TokenTextEncoder(TextEncoder):
"""Encoder based on a user-supplied vocabulary (file or list)."""
def __init__(self,
vocab_filename,
reverse=False,
vocab_list=None,
replace_oov=None,
num_reserved_ids=NUM_RESERVED_TOKENS):
"""Initialize from a file or list, one token per line.
Handling of reserved tokens works as follows:
- When initializing from a list, we add reserved tokens to the vocab.
- When initializing from a file, we do not add reserved tokens to the vocab.
- When saving vocab files, we save reserved tokens to the file.
Args:
vocab_filename: If not None, the full filename to read vocab from. If this
is not None, then vocab_list should be None.
reverse: Boolean indicating if tokens should be reversed during encoding
and decoding.
vocab_list: If not None, a list of elements of the vocabulary. If this is
not None, then vocab_filename should be None.
replace_oov: If not None, every out-of-vocabulary token seen when
encoding will be replaced by this string (which must be in vocab).
num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.
"""
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
self._reverse = reverse
self._replace_oov = replace_oov
if vocab_filename:
self._init_vocab_from_file(vocab_filename)
else:
assert vocab_list is not None
self._init_vocab_from_list(vocab_list)
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret
def decode(self, ids, strip_extraneous=False):
return " ".join(self.decode_list(ids))
def decode_list(self, ids):
seq = reversed(ids) if self._reverse else ids
return [self._safe_id_to_token(i) for i in seq]
@property
def vocab_size(self):
return len(self._id_to_token)
def _safe_id_to_token(self, idx):
return self._id_to_token.get(idx, "ID_%d" % idx)
def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False)
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen())
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token))
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n")
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return u"\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed)
class SubwordTextEncoder(TextEncoder):
"""Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self, filename=None):
"""Initialize and read from a file, if provided.
Args:
filename: filename from which to read vocab. If None, do not load a
vocab
"""
self._alphabet = set()
self.filename = filename
if filename is not None:
self._load_from_file(filename)
super(SubwordTextEncoder, self).__init__()
def encode(self, s):
"""Converts a native string to a list of subtoken ids.
Args:
s: a native string.
Returns:
a list of integers in the range [0, vocab_size)
"""
return self._tokens_to_subtoken_ids(
tokenizer.encode(native_to_unicode(s)))
def encode_without_tokenizing(self, token_text):
"""Converts string to list of subtoken ids without calling tokenizer.
This treats `token_text` as a single token and directly converts it
to subtoken ids. This may be useful when the default tokenizer doesn't
do what we want (e.g., when encoding text with tokens composed of lots of
nonalphanumeric characters). It is then up to the caller to make sure that
raw text is consistently converted into tokens. Only use this if you are
sure that `encode` doesn't suit your needs.
Args:
token_text: A native string representation of a single token.
Returns:
A list of subword token ids; i.e., integers in the range [0, vocab_size).
"""
return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])
def decode(self, ids, strip_extraneous=False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
def decode_list(self, ids):
return [self._subtoken_id_to_subtoken_string(s) for s in ids]
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret
def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret
def _subtoken_id_to_subtoken_string(self, subtoken):
"""Converts a subtoken integer ID to a subtoken string."""
if 0 <= subtoken < self.vocab_size:
return self._all_subtoken_strings[subtoken]
return u""
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
"""
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
]
@classmethod
def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder
@classmethod
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size)
@property
def all_subtoken_strings(self):
return tuple(self._all_subtoken_strings)
def dump(self):
"""Debugging dump of the current subtoken vocabulary."""
subtoken_strings = [(i, s)
for s, i in six.iteritems(self._subtoken_string_to_id)]
print(u", ".join(u"{0} : '{1}'".format(i, s)
for i, s in sorted(subtoken_strings)))
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
"""
subtoken_strings = []
for line in f:
s = line.strip()
# Some vocab files wrap words in single quotes, but others don't
if ((s.startswith("'") and s.endswith("'")) or
(s.startswith("\"") and s.endswith("\""))):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings)
def _load_from_file(self, filename):
"""Load from a vocab file."""
if not tf.gfile.Exists(filename):
raise ValueError("File %s not found" % filename)
with tf.gfile.Open(filename) as f:
self._load_from_file_object(f)
def store_to_file(self, filename, add_single_quotes=True):
with tf.gfile.Open(filename, "w") as f:
for subtoken_string in self._all_subtoken_strings:
if add_single_quotes:
f.write("'" + unicode_to_native(subtoken_string) + "'\n")
else:
f.write(unicode_to_native(subtoken_string) + "\n")
class ImageEncoder(object):
"""Encoder class for saving and loading images."""
def __init__(self, num_reserved_ids=0, height=None, width=None, channels=3):
assert num_reserved_ids == 0
self._height = height
self._width = width
self._channels = channels
@property
def num_reserved_ids(self):
return 0
def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
"""
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s)
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path
def decode_list(self, ids):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
Returns:
Singleton list: path to the temporary file where the image was saved.
"""
return [self.decode(ids)]
@property
def vocab_size(self):
return 256
class RealEncoder(object):
"""Encoder class for saving and loading float values."""
def encode(self, s):
"""Transform a string (space separated float values) into a float array.
Args:
s: space separated float values.
Returns:
Array of float values.
"""
return [float(w) for w in s.split()]
def decode(self, ids, strip_extraneous=False):
"""Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
return " ".join([str(i) for i in ids])
|
apache-2.0
|
fabioticconi/scikit-learn
|
sklearn/feature_selection/__init__.py
|
140
|
1302
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
from .mutual_info_ import mutual_info_regression, mutual_info_classif
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression']
|
bsd-3-clause
|
shaunstanislaus/pandashells
|
pandashells/test/p_facet_grid_test.py
|
10
|
1623
|
#! /usr/bin/env python
from mock import patch
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_facet_grid import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_facet_grid.sys.argv',
'p.facet_grid --row c --map pl.plot --args a b'.split())
@patch('pandashells.bin.p_facet_grid.io_lib.df_from_input')
@patch('pandashells.bin.p_facet_grid.plot_lib.show')
def test_no_kwargs(self, show_mock, input_mock):
import pylab as pl
df_in = pd.DataFrame([
{'a': 1, 'b': 10, 'c': 'alpha'},
{'a': 2, 'b': 20, 'c': 'alpha'},
{'a': 3, 'b': 30, 'c': 'beta'},
{'a': 4, 'b': 40, 'c': 'beta'},
])
input_mock.return_value = df_in
main()
self.assertEqual(len(pl.gcf().axes), 2)
self.assertTrue(show_mock.called)
@patch(
'pandashells.bin.p_facet_grid.sys.argv',
(
'p.facet_grid --row c --map pl.scatter '
'--args a b --kwargs s=100'.split()
)
)
@patch('pandashells.bin.p_facet_grid.io_lib.df_from_input')
@patch('pandashells.bin.p_facet_grid.plot_lib.show')
def test_with_kwargs(self, show_mock, input_mock):
import pylab as pl
df_in = pd.DataFrame([
{'a': 1, 'b': 10, 'c': 'alpha'},
{'a': 2, 'b': 20, 'c': 'alpha'},
{'a': 3, 'b': 30, 'c': 'beta'},
{'a': 4, 'b': 40, 'c': 'beta'},
])
input_mock.return_value = df_in
main()
self.assertEqual(len(pl.gcf().axes), 2)
self.assertTrue(show_mock.called)
|
bsd-2-clause
|
IshankGulati/scikit-learn
|
sklearn/linear_model/logistic.py
|
13
|
67587
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used when parallelizing over classes
if multi_class='ovr'".
If given a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : string, callable, or None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if self.multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_encoded_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if self.multi_class == 'ovr':
# The scores_ / coefs_paths_ dict have unencoded class
# labels as their keys
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/dtypes/test_dtypes.py
|
1
|
36491
|
import re
import numpy as np
import pytest
import pytz
from pandas.core.dtypes.base import registry
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_interval_dtype,
is_period_dtype,
is_string_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
IntervalIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class Base:
def test_hash(self, dtype):
hash(dtype)
def test_equality_invalid(self, dtype):
assert not dtype == "foo"
assert not is_dtype_equal(dtype, np.int64)
def test_numpy_informed(self, dtype):
# npdev 2020-02-02 changed from "data type not understood" to
# "Cannot interpret 'foo' as a data type"
msg = "|".join(
["data type not understood", "Cannot interpret '.*' as a data type"]
)
with pytest.raises(TypeError, match=msg):
np.dtype(dtype)
assert not dtype == np.str_
assert not np.str_ == dtype
def test_pickle(self, dtype):
# make sure our cache is NOT pickled
# clear the cache
type(dtype).reset_cache()
assert not len(dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(dtype)
if not isinstance(dtype, PeriodDtype):
# Because PeriodDtype has a cython class as a base class,
# it has different pickle semantics, and its cache is re-populated
# on un-pickling.
assert not len(dtype._cache)
assert result == dtype
class TestCategoricalDtype(Base):
@pytest.fixture
def dtype(self):
"""
Class level fixture of dtype for TestCategoricalDtype
"""
return CategoricalDtype()
def test_hash_vs_equality(self, dtype):
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self, dtype):
assert dtype == "category"
assert is_dtype_equal(dtype, "category")
assert "category" == dtype
assert is_dtype_equal("category", dtype)
assert dtype == CategoricalDtype()
assert is_dtype_equal(dtype, CategoricalDtype())
assert CategoricalDtype() == dtype
assert is_dtype_equal(CategoricalDtype(), dtype)
assert dtype != "foo"
assert not is_dtype_equal(dtype, "foo")
assert "foo" != dtype
assert not is_dtype_equal("foo", dtype)
def test_construction_from_string(self, dtype):
result = CategoricalDtype.construct_from_string("category")
assert is_dtype_equal(dtype, result)
msg = "Cannot construct a 'CategoricalDtype' from 'foo'"
with pytest.raises(TypeError, match=msg):
CategoricalDtype.construct_from_string("foo")
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(["a", "b"], ordered=True)
dtype2 = CategoricalDtype(["x", "y"], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize(
"values, categories, ordered, dtype, expected",
[
[None, None, None, None, CategoricalDtype()],
[None, ["a", "b"], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ["x", "y"], False, None, dtype2],
],
)
def test_from_values_or_dtype(self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
assert result == expected
@pytest.mark.parametrize(
"values, categories, ordered, dtype",
[
[None, ["a", "b"], True, dtype2],
[None, ["a", "b"], None, dtype2],
[None, None, True, dtype2],
],
)
def test_from_values_or_dtype_raises(self, values, categories, ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype)
def test_from_values_or_dtype_invalid_dtype(self):
msg = "Cannot not construct CategoricalDtype from <class 'object'>"
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(None, None, None, object)
def test_is_dtype(self, dtype):
assert CategoricalDtype.is_dtype(dtype)
assert CategoricalDtype.is_dtype("category")
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype("foo")
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self, dtype):
assert is_categorical_dtype(dtype)
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
s = Series(factor, name="A")
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype("float64"))
with tm.assert_produces_warning(FutureWarning):
# GH#33385 deprecated
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype("float64"))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, "a"), (2, "b"), (3, "c")]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize(
"categories, expected",
[
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
],
)
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
def test_dtype_specific_categorical_dtype(self):
expected = "datetime64[ns]"
result = str(Categorical(DatetimeIndex([])).categories.dtype)
assert result == expected
def test_not_string(self):
# though CategoricalDtype has object kind, it cannot be string
assert not is_string_dtype(CategoricalDtype())
def test_repr_range_categories(self):
rng = pd.Index(range(3))
dtype = CategoricalDtype(categories=rng, ordered=False)
result = repr(dtype)
expected = "CategoricalDtype(categories=range(0, 3), ordered=False)"
assert result == expected
def test_update_dtype(self):
# GH 27338
result = CategoricalDtype(["a"]).update_dtype(Categorical(["b"], ordered=True))
expected = CategoricalDtype(["b"], ordered=True)
assert result == expected
class TestDatetimeTZDtype(Base):
@pytest.fixture
def dtype(self):
"""
Class level fixture of dtype for TestDatetimeTZDtype
"""
return DatetimeTZDtype("ns", "US/Eastern")
def test_alias_to_unit_raises(self):
# 23990
with pytest.raises(ValueError, match="Passing a dtype alias"):
DatetimeTZDtype("datetime64[ns, US/Central]")
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=""):
DatetimeTZDtype("this is a bad string")
with pytest.raises(TypeError, match=""):
DatetimeTZDtype("datetime64[ns, US/NotATZ]")
def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
dtype2 = DatetimeTZDtype("ns", "US/Eastern")
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
msg = "DatetimeTZDtype only supports ns units"
with pytest.raises(ValueError, match=msg):
DatetimeTZDtype("ms", "US/Eastern")
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
b = DatetimeTZDtype.construct_from_string("datetime64[ns, CET]")
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self, dtype):
assert is_datetime64tz_dtype(dtype)
assert is_datetime64tz_dtype("datetime64[ns, US/Eastern]")
assert is_datetime64_any_dtype(dtype)
assert is_datetime64_any_dtype("datetime64[ns, US/Eastern]")
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_ns_dtype("datetime64[ns, US/Eastern]")
assert not is_datetime64_dtype(dtype)
assert not is_datetime64_dtype("datetime64[ns, US/Eastern]")
def test_construction_from_string(self, dtype):
result = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
assert is_dtype_equal(dtype, result)
@pytest.mark.parametrize(
"string",
[
"foo",
"datetime64[ns, notatz]",
# non-nano unit
"datetime64[ps, UTC]",
# dateutil str that returns None from gettz
"datetime64[ns, dateutil/invalid]",
],
)
def test_construct_from_string_invalid_raises(self, string):
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
with pytest.raises(TypeError, match=re.escape(msg)):
DatetimeTZDtype.construct_from_string(string)
def test_construct_from_string_wrong_type_raises(self):
msg = "'construct_from_string' expects a string, got <class 'list'>"
with pytest.raises(TypeError, match=msg):
DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
def test_is_dtype(self, dtype):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(dtype)
assert DatetimeTZDtype.is_dtype("datetime64[ns, US/Eastern]")
assert DatetimeTZDtype.is_dtype("M8[ns, US/Eastern]")
assert not DatetimeTZDtype.is_dtype("foo")
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype("ns", "US/Pacific"))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self, dtype):
assert is_dtype_equal(dtype, "datetime64[ns, US/Eastern]")
assert is_dtype_equal(dtype, "M8[ns, US/Eastern]")
assert is_dtype_equal(dtype, DatetimeTZDtype("ns", "US/Eastern"))
assert not is_dtype_equal(dtype, "foo")
assert not is_dtype_equal(dtype, DatetimeTZDtype("ns", "CET"))
assert not is_dtype_equal(
DatetimeTZDtype("ns", "US/Eastern"), DatetimeTZDtype("ns", "US/Pacific")
)
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
assert dtype == "M8[ns, US/Eastern]"
def test_basic(self, dtype):
assert is_datetime64tz_dtype(dtype)
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr, name="A")
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype("float64"))
assert not is_datetime64tz_dtype(1.0)
def test_dst(self):
dr1 = date_range("2013-01-01", periods=3, tz="US/Eastern")
s1 = Series(dr1, name="A")
assert is_datetime64tz_dtype(s1)
dr2 = date_range("2013-08-01", periods=3, tz="US/Eastern")
s2 = Series(dr2, name="A")
assert is_datetime64tz_dtype(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern"])
@pytest.mark.parametrize("constructor", ["M8", "datetime64"])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = f"{constructor}[ns, {tz}]"
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype("ns", tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
def test_tz_standardize(self):
# GH 24713
tz = pytz.timezone("US/Eastern")
dr = date_range("2013-01-01", periods=3, tz="US/Eastern")
dtype = DatetimeTZDtype("ns", dr.tz)
assert dtype.tz == tz
dtype = DatetimeTZDtype("ns", dr[0].tz)
assert dtype.tz == tz
class TestPeriodDtype(Base):
@pytest.fixture
def dtype(self):
"""
Class level fixture of dtype for TestPeriodDtype
"""
return PeriodDtype("D")
def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
dtype2 = PeriodDtype("D")
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError, match="Invalid frequency: xx"):
PeriodDtype("xx")
for s in ["period[D]", "Period[D]", "D"]:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ["period[3D]", "Period[3D]", "3D"]:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in [
"period[26H]",
"Period[26H]",
"26H",
"period[1D2H]",
"Period[1D2H]",
"1D2H",
]:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype("period[D]")
b = PeriodDtype("period[3D]")
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype("period[D]") == PeriodDtype("period[D]")
assert PeriodDtype("period[D]") is PeriodDtype("period[D]")
assert PeriodDtype("period[3D]") == PeriodDtype("period[3D]")
assert PeriodDtype("period[3D]") is PeriodDtype("period[3D]")
assert PeriodDtype("period[1S1U]") == PeriodDtype("period[1000001U]")
assert PeriodDtype("period[1S1U]") is PeriodDtype("period[1000001U]")
def test_compat(self, dtype):
assert not is_datetime64_ns_dtype(dtype)
assert not is_datetime64_ns_dtype("period[D]")
assert not is_datetime64_dtype(dtype)
assert not is_datetime64_dtype("period[D]")
def test_construction_from_string(self, dtype):
result = PeriodDtype("period[D]")
assert is_dtype_equal(dtype, result)
result = PeriodDtype.construct_from_string("period[D]")
assert is_dtype_equal(dtype, result)
with pytest.raises(TypeError, match="list"):
PeriodDtype.construct_from_string([1, 2, 3])
@pytest.mark.parametrize(
"string",
[
"foo",
"period[foo]",
"foo[D]",
"datetime64[ns]",
"datetime64[ns, US/Eastern]",
],
)
def test_construct_dtype_from_string_invalid_raises(self, string):
msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
with pytest.raises(TypeError, match=re.escape(msg)):
PeriodDtype.construct_from_string(string)
def test_is_dtype(self, dtype):
assert PeriodDtype.is_dtype(dtype)
assert PeriodDtype.is_dtype("period[D]")
assert PeriodDtype.is_dtype("period[3D]")
assert PeriodDtype.is_dtype(PeriodDtype("3D"))
assert PeriodDtype.is_dtype("period[U]")
assert PeriodDtype.is_dtype("period[S]")
assert PeriodDtype.is_dtype(PeriodDtype("U"))
assert PeriodDtype.is_dtype(PeriodDtype("S"))
assert not PeriodDtype.is_dtype("D")
assert not PeriodDtype.is_dtype("3D")
assert not PeriodDtype.is_dtype("U")
assert not PeriodDtype.is_dtype("S")
assert not PeriodDtype.is_dtype("foo")
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self, dtype):
assert is_dtype_equal(dtype, "period[D]")
assert is_dtype_equal(dtype, PeriodDtype("D"))
assert is_dtype_equal(dtype, PeriodDtype("D"))
assert is_dtype_equal(PeriodDtype("D"), PeriodDtype("D"))
assert not is_dtype_equal(dtype, "D")
assert not is_dtype_equal(PeriodDtype("D"), PeriodDtype("2D"))
def test_basic(self, dtype):
assert is_period_dtype(dtype)
pidx = pd.period_range("2013-01-01 09:00", periods=5, freq="H")
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
s = Series(pidx, name="A")
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
assert not is_period_dtype(np.dtype("float64"))
assert not is_period_dtype(1.0)
def test_empty(self):
dt = PeriodDtype()
msg = "object has no attribute 'freqstr'"
with pytest.raises(AttributeError, match=msg):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype("D"))
class TestIntervalDtype(Base):
@pytest.fixture
def dtype(self):
"""
Class level fixture of dtype for TestIntervalDtype
"""
return IntervalDtype("int64")
def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
dtype2 = IntervalDtype("int64")
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype("interval")
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype("interval")
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize(
"subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")]
)
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype("int64")
assert is_interval_dtype(i)
@pytest.mark.parametrize("subtype", [None, "interval", "Interval"])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize(
"subtype",
[
CategoricalDtype(list("abc"), False),
CategoricalDtype(list("wxyz"), True),
object,
str,
"<U10",
"interval[category]",
"interval[object]",
],
)
def test_construction_not_supported(self, subtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalDtype"
)
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize("subtype", ["xx", "IntervalA", "Interval[foo]"])
def test_construction_errors(self, subtype):
msg = "could not construct IntervalDtype"
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self, dtype):
result = IntervalDtype("interval[int64]")
assert is_dtype_equal(dtype, result)
result = IntervalDtype.construct_from_string("interval[int64]")
assert is_dtype_equal(dtype, result)
@pytest.mark.parametrize("string", [0, 3.14, ("a", "b"), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = f"'construct_from_string' expects a string, got {type(string)}"
with pytest.raises(TypeError, match=re.escape(msg)):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize("string", ["foo", "foo[int64]", "IntervalA"])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = (
"Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta"
)
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype("interval[int64]")
b = IntervalDtype("interval[int64]")
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self, dtype):
assert IntervalDtype.is_dtype(dtype)
assert IntervalDtype.is_dtype("interval")
assert IntervalDtype.is_dtype(IntervalDtype("float64"))
assert IntervalDtype.is_dtype(IntervalDtype("int64"))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype("D")
assert not IntervalDtype.is_dtype("3D")
assert not IntervalDtype.is_dtype("U")
assert not IntervalDtype.is_dtype("S")
assert not IntervalDtype.is_dtype("foo")
assert not IntervalDtype.is_dtype("IntervalA")
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self, dtype):
assert is_dtype_equal(dtype, "interval[int64]")
assert is_dtype_equal(dtype, IntervalDtype("int64"))
assert is_dtype_equal(IntervalDtype("int64"), IntervalDtype("int64"))
assert not is_dtype_equal(dtype, "int64")
assert not is_dtype_equal(IntervalDtype("int64"), IntervalDtype("float64"))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype("float64")
dtype2 = IntervalDtype("datetime64[ns, US/Eastern]")
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize(
"subtype",
[
None,
"interval",
"Interval",
"int64",
"uint64",
"float64",
"complex128",
"datetime64",
"timedelta64",
PeriodDtype("Q"),
],
)
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, "interval")
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize(
"subtype",
[
"int64",
"uint64",
"float64",
"complex128",
"datetime64",
"timedelta64",
PeriodDtype("Q"),
],
)
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = f"interval[{subtype}]"
assert str(dtype) == expected
assert dtype.name == "interval"
@pytest.mark.parametrize("subtype", [None, "interval", "Interval"])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == "interval"
assert dtype.name == "interval"
def test_basic(self, dtype):
assert is_interval_dtype(dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name="A")
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype("interval[int64]")
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(
IntervalIndex.from_breaks(date_range("20130101", periods=3))
)
assert not is_interval_dtype("U")
assert not is_interval_dtype("S")
assert not is_interval_dtype("foo")
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
def test_not_string(self):
# GH30568: though IntervalDtype has object kind, it cannot be string
assert not is_string_dtype(IntervalDtype())
class TestCategoricalDtypeParametrized:
@pytest.mark.parametrize(
"categories",
[
list("abcd"),
np.arange(1000),
["a", "b", 10, 2, 1.3, True],
[True, False],
pd.date_range("2017", periods=4),
],
)
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ["a", "b"]
c1 = CategoricalDtype(categories, ordered=True)
c2 = CategoricalDtype(categories, ordered=False)
c3 = CategoricalDtype(categories, ordered=None)
assert c1 is not c2
assert c1 is not c3
@pytest.mark.parametrize("ordered", [False, None])
def test_unordered_same(self, ordered):
c1 = CategoricalDtype(["a", "b"], ordered=ordered)
c2 = CategoricalDtype(["b", "a"], ordered=ordered)
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(["a", "b", "c"])
tm.assert_index_equal(result.categories, pd.Index(["a", "b", "c"]))
assert result.ordered is False
def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1.0, 2.0, 3.0])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize("v1, v2", [([1, 2, 3], [1, 2, 3]), ([1, 2, 3], [3, 2, 1])])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1, ordered=False)
c2 = CategoricalDtype(v2, ordered=True)
c3 = CategoricalDtype(v1, ordered=None)
assert c1 is not c2
assert c1 is not c3
def test_nan_invalid(self):
msg = "Categorical categories cannot be null"
with pytest.raises(ValueError, match=msg):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
CategoricalDtype([1, 2, 1])
def test_same_categories_different_order(self):
c1 = CategoricalDtype(["a", "b"], ordered=True)
c2 = CategoricalDtype(["b", "a"], ordered=True)
assert c1 is not c2
@pytest.mark.parametrize("ordered1", [True, False, None])
@pytest.mark.parametrize("ordered2", [True, False, None])
def test_categorical_equality(self, ordered1, ordered2):
# same categories, same order
# any combination of None/False are equal
# True/True is the only combination with True that are equal
c1 = CategoricalDtype(list("abc"), ordered1)
c2 = CategoricalDtype(list("abc"), ordered2)
result = c1 == c2
expected = bool(ordered1) is bool(ordered2)
assert result is expected
# same categories, different order
# any combination of None/False are equal (order doesn't matter)
# any combination with True are not equal (different order of cats)
c1 = CategoricalDtype(list("abc"), ordered1)
c2 = CategoricalDtype(list("cab"), ordered2)
result = c1 == c2
expected = (bool(ordered1) is False) and (bool(ordered2) is False)
assert result is expected
# different categories
c2 = CategoricalDtype([1, 2, 3], ordered2)
assert c1 != c2
# none categories
c1 = CategoricalDtype(list("abc"), ordered1)
c2 = CategoricalDtype(None, ordered2)
c3 = CategoricalDtype(None, ordered1)
assert c1 != c2
assert c2 != c1
assert c2 == c3
def test_categorical_dtype_equality_requires_categories(self):
# CategoricalDtype with categories=None is *not* equal to
# any fully-initialized CategoricalDtype
first = CategoricalDtype(["a", "b"])
second = CategoricalDtype()
third = CategoricalDtype(ordered=True)
assert second == second
assert third == third
assert first != second
assert second != first
assert first != third
assert third != first
assert second == third
assert third == second
@pytest.mark.parametrize("categories", [list("abc"), None])
@pytest.mark.parametrize("other", ["category", "not a category"])
def test_categorical_equality_strings(self, categories, ordered, other):
c1 = CategoricalDtype(categories, ordered)
result = c1 == other
expected = other == "category"
assert result is expected
def test_invalid_raises(self):
with pytest.raises(TypeError, match="ordered"):
CategoricalDtype(["a", "b"], ordered="foo")
with pytest.raises(TypeError, match="'categories' must be list-like"):
CategoricalDtype("category")
def test_mixed(self):
a = CategoricalDtype(["a", "b", 1, 2])
b = CategoricalDtype(["a", "b", "1", "2"])
assert hash(a) != hash(b)
def test_from_categorical_dtype_identity(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# Identity test for no changes
c2 = CategoricalDtype._from_categorical_dtype(c1)
assert c2 is c1
def test_from_categorical_dtype_categories(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override categories
result = CategoricalDtype._from_categorical_dtype(c1, categories=[2, 3])
assert result == CategoricalDtype([2, 3], ordered=True)
def test_from_categorical_dtype_ordered(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(c1, ordered=False)
assert result == CategoricalDtype([1, 2, 3], ordered=False)
def test_from_categorical_dtype_both(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[1, 2], ordered=False
)
assert result == CategoricalDtype([1, 2], ordered=False)
def test_str_vs_repr(self, ordered):
c1 = CategoricalDtype(["a", "b"], ordered=ordered)
assert str(c1) == "category"
# Py2 will have unicode prefixes
pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)"
assert re.match(pat.format(ordered=ordered), repr(c1))
def test_categorical_categories(self):
# GH17884
c1 = CategoricalDtype(Categorical(["a", "b"]))
tm.assert_index_equal(c1.categories, pd.Index(["a", "b"]))
c1 = CategoricalDtype(CategoricalIndex(["a", "b"]))
tm.assert_index_equal(c1.categories, pd.Index(["a", "b"]))
@pytest.mark.parametrize(
"new_categories", [list("abc"), list("cba"), list("wxyz"), None]
)
@pytest.mark.parametrize("new_ordered", [True, False, None])
def test_update_dtype(self, ordered, new_categories, new_ordered):
original_categories = list("abc")
dtype = CategoricalDtype(original_categories, ordered)
new_dtype = CategoricalDtype(new_categories, new_ordered)
result = dtype.update_dtype(new_dtype)
expected_categories = pd.Index(new_categories or original_categories)
expected_ordered = new_ordered if new_ordered is not None else dtype.ordered
tm.assert_index_equal(result.categories, expected_categories)
assert result.ordered is expected_ordered
def test_update_dtype_string(self, ordered):
dtype = CategoricalDtype(list("abc"), ordered)
expected_categories = dtype.categories
expected_ordered = dtype.ordered
result = dtype.update_dtype("category")
tm.assert_index_equal(result.categories, expected_categories)
assert result.ordered is expected_ordered
@pytest.mark.parametrize("bad_dtype", ["foo", object, np.int64, PeriodDtype("Q")])
def test_update_dtype_errors(self, bad_dtype):
dtype = CategoricalDtype(list("abc"), False)
msg = "a CategoricalDtype must be passed to perform an update, "
with pytest.raises(ValueError, match=msg):
dtype.update_dtype(bad_dtype)
@pytest.mark.parametrize(
"dtype", [CategoricalDtype, IntervalDtype, DatetimeTZDtype, PeriodDtype]
)
def test_registry(dtype):
assert dtype in registry.dtypes
@pytest.mark.parametrize(
"dtype, expected",
[
("int64", None),
("interval", IntervalDtype()),
("interval[int64]", IntervalDtype()),
("interval[datetime64[ns]]", IntervalDtype("datetime64[ns]")),
("period[D]", PeriodDtype("D")),
("category", CategoricalDtype()),
("datetime64[ns, US/Eastern]", DatetimeTZDtype("ns", "US/Eastern")),
],
)
def test_registry_find(dtype, expected):
assert registry.find(dtype) == expected
@pytest.mark.parametrize(
"dtype, expected",
[
(str, False),
(int, False),
(bool, True),
(np.bool_, True),
(np.array(["a", "b"]), False),
(Series([1, 2]), False),
(np.array([True, False]), True),
(Series([True, False]), True),
(SparseArray([True, False]), True),
(SparseDtype(bool), True),
],
)
def test_is_bool_dtype(dtype, expected):
result = is_bool_dtype(dtype)
assert result is expected
def test_is_bool_dtype_sparse():
result = is_bool_dtype(Series(SparseArray([True, False])))
assert result is True
@pytest.mark.parametrize(
"check",
[
is_categorical_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_datetime64_ns_dtype,
is_datetime64_dtype,
is_interval_dtype,
is_datetime64_any_dtype,
is_string_dtype,
is_bool_dtype,
],
)
def test_is_dtype_no_warning(check):
data = pd.DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(None):
check(data)
with tm.assert_produces_warning(None):
check(data["A"])
def test_period_dtype_compare_to_string():
# https://github.com/pandas-dev/pandas/issues/37265
dtype = PeriodDtype(freq="M")
assert (dtype == "period[M]") is True
assert (dtype != "period[M]") is False
|
bsd-3-clause
|
nelson-liu/scikit-learn
|
benchmarks/bench_lof.py
|
49
|
3548
|
"""
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
np.random.seed(2)
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['shuttle']
novelty_detection = True # if False, training set polluted by outliers
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, shuffle=True,
percent10=False)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs,'
'test-time: %0.2fs)' % (dataset_name, AUC, fit_time,
predict_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
florentchandelier/zipline
|
tests/risk/test_risk_cumulative.py
|
3
|
4321
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import zipline.finance.risk as risk
from zipline.utils import factory
from zipline.testing.fixtures import WithTradingEnvironment, ZiplineTestCase
from zipline.finance.trading import SimulationParameters
RETURNS_BASE = 0.01
RETURNS = [RETURNS_BASE] * 251
BENCHMARK_BASE = 0.005
BENCHMARK = [BENCHMARK_BASE] * 251
DECIMAL_PLACES = 8
class TestRisk(WithTradingEnvironment, ZiplineTestCase):
def init_instance_fixtures(self):
super(TestRisk, self).init_instance_fixtures()
start_session = pd.Timestamp("2006-01-01", tz='UTC')
end_session = pd.Timestamp("2006-12-29", tz='UTC')
self.sim_params = SimulationParameters(
start_session=start_session,
end_session=end_session,
trading_calendar=self.trading_calendar,
)
self.algo_returns = factory.create_returns_from_list(
RETURNS,
self.sim_params
)
self.cumulative_metrics = risk.RiskMetricsCumulative(
self.sim_params,
treasury_curves=self.env.treasury_curves,
trading_calendar=self.trading_calendar,
)
for dt, returns in self.algo_returns.iteritems():
self.cumulative_metrics.update(
dt,
returns,
BENCHMARK_BASE,
0.0
)
def test_algorithm_volatility(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.algorithm_volatility)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.algorithm_volatility),
True
)
def test_sharpe(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.sharpe)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.sharpe),
True)
def test_downside_risk(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.downside_risk)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.downside_risk),
True)
def test_sortino(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.sortino)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.sortino),
True)
def test_alpha(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.alpha)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.alpha),
True)
def test_beta(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.beta)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.beta),
True)
def test_max_drawdown(self):
np.testing.assert_equal(
len(self.algo_returns),
len(self.cumulative_metrics.max_drawdowns)
)
np.testing.assert_equal(
all(isinstance(x, float)
for x in self.cumulative_metrics.max_drawdowns),
True)
def test_representation(self):
assert all(metric in repr(self.cumulative_metrics)
for metric in self.cumulative_metrics.METRIC_NAMES)
|
apache-2.0
|
canast02/microsoft-malware-classification-challenge
|
solution4.py
|
1
|
3651
|
import os
import numpy as np
from csv import reader, writer
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
import six
# Decide read/write mode based on python version
read_mode, write_mode = ('r', 'w') if six.PY2 else ('rt', 'wt')
# Set path to your consolidated files
path = '/Users/chrysovalantis/Documents/UCY/EPL451/Project'
os.chdir(path)
# File names
ftrain = 'train_consolidation.txt'
ftest = 'test_consolidation.txt'
flabel = 'trainLabels.csv'
fsubmission = 'submission.csv'
print('loading started')
# Lets read labels first as things are not sorted in files
labels = {}
with open(flabel) as f:
next(f) # Ignoring header
for row in reader(f):
labels[row[0]] = int(row[1])
print('labels loaded')
# Dimensions for train set
ntrain = 10868
nfeature = 16 ** 2 + 1 + 1 # For two_byte_codes, no_que_marks, label
train = np.zeros((ntrain, nfeature), dtype=int)
with open(ftrain) as f:
next(f) # Ignoring header
for t, row in enumerate(reader(f)):
# train[t, :-1] = map(int, row[1:]) if six.PY2 else list(map(int, row[1:]))
train[t, :-1] = map(float, row[1:]) if six.PY2 else list(map(float, row[1:]))
train[t, -1] = labels[row[0]]
if (t + 1) % 1000 == 0:
print((t + 1) * 100.0 / ntrain, '% of records loaded')
print('training set loaded')
del labels
# Parameters for Randomforest
random_state = 5342
n_jobs = 8
verbose = 2
clf1 = ExtraTreesClassifier(criterion='entropy', random_state=random_state, n_jobs=n_jobs, verbose=verbose)
clf2 = ExtraTreesClassifier(criterion='entropy', random_state=random_state, n_jobs=n_jobs, verbose=verbose)
clf3 = RandomForestClassifier(criterion='entropy', random_state=random_state, n_jobs=n_jobs, verbose=verbose)
clf4 = RandomForestClassifier(criterion='entropy', random_state=random_state, n_jobs=n_jobs, verbose=verbose)
# Start training
print('training started')
clf1.fit(train[:, :-1], train[:, -1])
X_new1 = clf1.transform(train[:, :-1])
X_new2 = clf3.fit_transform(train[:, :-1], train[:, -1])
# print('importances', clf1.feature_importances_)
clf2.fit(X_new1, train[:, -1])
clf4.fit(X_new2, train[:, -1])
print('training completed')
print('n_components = ', len(X_new1[0]), len(X_new2[0]))
# We don't need training set now
del train
# Dimensions for train set
ntest = 10873
nfeature = 16 ** 2 + 1 # For two_byte_codes, no_que_marks
test = np.zeros((ntest, nfeature), dtype=int)
Ids = [] # Required test set ids
with open(ftest, read_mode) as f:
next(f) # Ignoring header
for t, row in enumerate(reader(f)):
# test[t, :] = map(int, row[1:]) if six.PY2 else list(map(int, row[1:]))
test[t, :] = map(float, row[1:]) if six.PY2 else list(map(float, row[1:]))
Ids.append(row[0])
if (t + 1) % 1000 == 0:
print(t + 1, 'records loaded')
print('test set loaded')
Y_new1 = clf1.transform(test)
Y_new2 = clf3.transform(test)
# Predict for whole test set
y_pred1 = clf2.predict_proba(Y_new1)
y_pred2 = clf4.predict_proba(Y_new2)
y_pred = np.zeros((len(y_pred1), len(y_pred1[0])), dtype=float)
# iterate through rows
for i in range(len(y_pred)):
# iterate through columns
for j in range(len(y_pred[0])):
y_pred[i][j] = (y_pred1[i][j] + y_pred2[i][j]) / 2.0
# Writing results to file
with open(fsubmission, write_mode) as f:
fw = writer(f)
# Header preparation
header = ['Id'] + ['Prediction' + str(i) for i in range(1, 10)]
fw.writerow(header)
for t, (Id, pred) in enumerate(zip(Ids, y_pred.tolist())):
fw.writerow([Id] + pred)
if (t + 1) % 1000 == 0:
print(t + 1, 'prediction written')
|
apache-2.0
|
ihmeuw/vivarium
|
tests/framework/test_randomness.py
|
1
|
5691
|
import pytest
import pandas as pd
import numpy as np
import vivarium.framework.randomness as random
from vivarium.framework.randomness import RandomnessManager, RandomnessStream, RESIDUAL_CHOICE, RandomnessError
@pytest.fixture(params=[10**4, 10**5])
def index(request):
return pd.Index(range(request.param)) if request.param else None
@pytest.fixture(params=[['a', 'small', 'bird']])
def choices(request):
return request.param
# TODO: Add 2-d weights to the tests.
@pytest.fixture(params=[None, [10, 10, 10], [0.5, 0.1, 0.4]])
def weights(request):
return request.param
@pytest.fixture(params=[(0.2, 0.1, RESIDUAL_CHOICE),
(0.5, 0.6, RESIDUAL_CHOICE),
(.1, RESIDUAL_CHOICE, RESIDUAL_CHOICE)])
def weights_with_residuals(request):
return request.param
def test_normalize_shape(weights_with_residuals, index):
p = random._normalize_shape(weights_with_residuals, index)
assert p.shape == (len(index), len(weights_with_residuals))
def test__set_residual_probability(weights_with_residuals, index):
# Coerce the weights to a 2-d numpy array.
p = random._normalize_shape(weights_with_residuals, index)
residual = np.where(p == RESIDUAL_CHOICE, 1, 0)
non_residual = np.where(p != RESIDUAL_CHOICE, p, 0)
if np.any(non_residual.sum(axis=1) > 1):
with pytest.raises(RandomnessError):
# We received un-normalized probability weights.
random._set_residual_probability(p)
elif np.any(residual.sum(axis=1) > 1):
with pytest.raises(RandomnessError):
# We received multiple instances of `RESIDUAL_CHOICE`
random._set_residual_probability(p)
else: # Things should work
p_total = np.sum(random._set_residual_probability(p))
assert np.isclose(p_total, len(index), atol=0.0001)
def test_filter_for_probability(index):
dates = [pd.Timestamp(1991, 1, 1), pd.Timestamp(1990, 1, 1)]
randomness = RandomnessStream('test', dates.pop, 1)
sub_index = randomness.filter_for_probability(index, 0.5)
assert round(len(sub_index)/len(index), 1) == 0.5
sub_sub_index = randomness.filter_for_probability(sub_index, 0.5)
assert round(len(sub_sub_index)/len(sub_index), 1) == 0.5
def test_choice(index, choices, weights):
dates = [pd.Timestamp(1990, 1, 1)]
randomness = RandomnessStream('test', dates.pop, 1)
chosen = randomness.choice(index, choices, p=weights)
count = chosen.value_counts()
# If we have weights, normalize them, otherwise generate uniform weights.
weights = [w/sum(weights) for w in weights] if weights else [1/len(choices) for _ in choices]
for k, c in count.items():
assert np.isclose(c/len(index), weights[choices.index(k)], atol=0.01)
def test_choice_with_residuals(index, choices, weights_with_residuals):
print(RESIDUAL_CHOICE in weights_with_residuals)
dates = [pd.Timestamp(1990, 1, 1)]
randomness = RandomnessStream('test', dates.pop, 1)
p = random._normalize_shape(weights_with_residuals, index)
residual = np.where(p == RESIDUAL_CHOICE, 1, 0)
non_residual = np.where(p != RESIDUAL_CHOICE, p, 0)
if np.any(non_residual.sum(axis=1) > 1):
with pytest.raises(RandomnessError):
# We received un-normalized probability weights.
randomness.choice(index, choices, p=weights_with_residuals)
elif np.any(residual.sum(axis=1) > 1):
with pytest.raises(RandomnessError):
# We received multiple instances of `RESIDUAL_CHOICE`
randomness.choice(index, choices, p=weights_with_residuals)
else: # Things should work
chosen = randomness.choice(index, choices, p=weights_with_residuals)
count = chosen.value_counts()
print(weights_with_residuals)
# We're relying on the fact that weights_with_residuals is a 1-d list
residual_p = 1 - sum([w for w in weights_with_residuals if w != RESIDUAL_CHOICE])
weights = [w if w != RESIDUAL_CHOICE else residual_p for w in weights_with_residuals]
for k, c in count.items():
assert np.isclose(c / len(index), weights[choices.index(k)], atol=0.01)
def mock_clock():
return pd.Timestamp('1/1/2005')
def test_RandomnessManager_get_randomness_stream():
seed = 123456
rm = RandomnessManager()
rm._add_constraint = lambda f, **kwargs: f
rm._seed = seed
rm._clock = mock_clock
stream = rm._get_randomness_stream('test')
assert stream.key == 'test'
assert stream.seed == seed
assert stream.clock is mock_clock
assert set(rm._decision_points.keys()) == {'test'}
with pytest.raises(RandomnessError):
rm.get_randomness_stream('test')
def test_RandomnessManager_register_simulants():
seed = 123456
rm = RandomnessManager()
rm._add_constraint = lambda f, **kwargs: f
rm._seed = seed
rm._clock = mock_clock
rm._key_columns = ['age', 'sex']
bad_df = pd.DataFrame({'age': range(10),
'not_sex': [1]*5 + [2]*5})
with pytest.raises(RandomnessError):
rm.register_simulants(bad_df)
good_df = pd.DataFrame({'age': range(10),
'sex': [1]*5 + [2]*5})
rm.register_simulants(good_df)
assert rm._key_mapping._map.index.difference(good_df.set_index(good_df.columns.tolist()).index).empty
def test_get_random_seed():
seed = '123456'
decision_point = 'test'
rm = RandomnessManager()
rm._add_constraint = lambda f, **kwargs: f
rm._seed = seed
rm._clock = mock_clock
assert rm.get_seed(decision_point) == random.get_hash(f'{decision_point}_{rm._clock()}_{seed}')
|
gpl-3.0
|
start-jsk/jsk_apc
|
demos/grasp_fusion/grasp_fusion_lib/image.py
|
2
|
12837
|
import warnings
import cv2
import matplotlib
import numpy as np
import scipy
import six
import skimage.color
import skimage.transform
import skimage.util
def colorize_depth(depth, min_value=None, max_value=None, dtype=np.uint8):
"""Colorize depth image with JET colormap."""
min_value = np.nanmin(depth) if min_value is None else min_value
max_value = np.nanmax(depth) if max_value is None else max_value
if np.isinf(min_value) or np.isinf(max_value):
warnings.warn('Min or max value for depth colorization is inf.')
if max_value == min_value:
eps = np.finfo(depth.dtype).eps
max_value += eps
min_value -= eps
colorized = depth.copy()
nan_mask = np.isnan(colorized)
colorized[nan_mask] = 0
colorized = 1. * (colorized - min_value) / (max_value - min_value)
colorized = matplotlib.cm.jet(colorized)[:, :, :3]
if dtype == np.uint8:
colorized = (colorized * 255).astype(dtype)
else:
assert np.issubdtype(dtype, np.floating)
colorized = colorized.astype(dtype)
colorized[nan_mask] = (0, 0, 0)
return colorized
def colorize_heatmap(heatmap):
"""Colorize heatmap which ranges 0 to 1.
Parameters
----------
heatmap: numpy.ndarray
Heatmap which ranges 0 to 1.
"""
if not (0 <= heatmap.min() <= 1):
raise ValueError('Heatmap min value must range from 0 to 1')
if not (0 <= heatmap.max() <= 1):
raise ValueError('Heatmap max value must range from 0 to 1')
return colorize_depth(heatmap, min_value=0, max_value=1)
def overlay_color_on_mono(img_color, img_mono, alpha=0.5):
"""Overlay color image on mono.
Parameters
----------
img_color: numpy.ndarray, (H, W, 3)
img_mono: numpy.ndarray, (H, W, 3) or (H, W)
alpha: float
Alpha value for color.
Returns
-------
dst: numpy.ndarray
Output image.
"""
# RGB -> Gray
if img_mono.ndim == 3:
img_mono = skimage.color.rgb2gray(img_mono)
img_mono = skimage.color.gray2rgb(img_mono)
img_mono = skimage.util.img_as_float(img_mono)
img_color = skimage.util.img_as_float(img_color)
dst = alpha * img_color + (1 - alpha) * img_mono
dst = (dst * 255).astype(np.uint8)
return dst
def label_colormap(n_label=256):
"""Colormap for specified number of labels.
Parameters
----------
n_label: int
Number of labels and colors.
"""
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
cmap = np.zeros((n_label, 3))
for i in six.moves.range(0, n_label):
id = i
r, g, b = 0, 0, 0
for j in six.moves.range(0, 8):
r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))
g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))
b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))
id = (id >> 3)
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
cmap = cmap.astype(np.float32) / 255
return cmap
def centerize(src, shape, margin_color=None, return_mask=False):
"""Centerize image for specified image size
Parameters
----------
src: numpy.ndarray
Image to centerize
shape: tuple of int
Image shape (height, width) or (height, width, channel)
margin_color: numpy.ndarray
Color to be filled in the blank.
return_mask: numpy.ndarray
Mask for centerized image.
"""
if src.shape[:2] == shape[:2]:
if return_mask:
return src, np.ones(shape[:2], dtype=bool)
else:
return src
if len(shape) != src.ndim:
shape = list(shape) + [src.shape[2]]
centerized = np.zeros(shape, dtype=src.dtype)
if margin_color:
centerized[:, :] = margin_color
src_h, src_w = src.shape[:2]
scale_h, scale_w = 1. * shape[0] / src_h, 1. * shape[1] / src_w
scale = min(scale_h, scale_w)
src = cv2.resize(src, None, None, fx=scale, fy=scale)
ph, pw = 0, 0
h, w = src.shape[:2]
dst_h, dst_w = shape[:2]
if h < dst_h:
ph = (dst_h - h) // 2
if w < dst_w:
pw = (dst_w - w) // 2
mask = np.zeros(shape[:2], dtype=bool)
mask[ph:ph + h, pw:pw + w] = True
centerized[ph:ph + h, pw:pw + w] = src
if return_mask:
return centerized, mask
else:
return centerized
def _tile(imgs, shape, dst):
"""Tile images which have same size.
Parameters
----------
imgs: numpy.ndarray
Image list which should be tiled.
shape: tuple of int
Tile shape.
dst:
Image to put the tile on.
"""
y_num, x_num = shape
tile_w = imgs[0].shape[1]
tile_h = imgs[0].shape[0]
if dst is None:
if len(imgs[0].shape) == 3:
dst = np.zeros((tile_h * y_num, tile_w * x_num, 3), dtype=np.uint8)
else:
dst = np.zeros((tile_h * y_num, tile_w * x_num), dtype=np.uint8)
for y in range(y_num):
for x in range(x_num):
i = x + y * x_num
if i < len(imgs):
y1 = y * tile_h
y2 = (y + 1) * tile_h
x1 = x * tile_w
x2 = (x + 1) * tile_w
dst[y1:y2, x1:x2] = imgs[i]
return dst
def _get_tile_shape(num):
import math
x_num = int(math.sqrt(num))
y_num = 0
while x_num * y_num < num:
y_num += 1
return x_num, y_num
def tile(
imgs,
shape=None,
dst=None,
margin_color=None,
boundary=False,
boundary_color=(255, 255, 255),
boundary_thickness=3,
):
"""Tile images which have different size.
Parameters
----------
imgs:
Image list which should be tiled.
shape:
The tile shape.
dst:
Image to put the tile on.
margin_color: numpy.ndarray
Color to be filled in the blank.
"""
imgs = imgs[:]
if shape is None:
shape = _get_tile_shape(len(imgs))
# get max tile size to which each image should be resized
max_h, max_w = np.inf, np.inf
for img in imgs:
max_h = min(max_h, img.shape[0])
max_w = min(max_w, img.shape[1])
# tile images
is_color = False
for i, img in enumerate(imgs):
if img.ndim >= 3:
is_color = True
if is_color and img.ndim == 2:
img = skimage.color.gray2rgb(img)
if is_color and img.shape[2] == 4:
img = img[:, :, :3]
img = skimage.util.img_as_ubyte(img)
img = centerize(img, (max_h, max_w, 3), margin_color)
if boundary:
cv2.rectangle(img, (1, 1), (img.shape[1] - 1, img.shape[0] - 1),
boundary_color, thickness=boundary_thickness)
imgs[i] = img
return _tile(imgs, shape, dst)
def get_text_color(color):
if color[0] * 0.299 + color[1] * 0.587 + color[2] * 0.114 > 170:
return (0, 0, 0)
return (255, 255, 255)
def label2rgb(lbl, img=None, label_names=None, n_labels=None,
alpha=0.5, thresh_suppress=0):
if label_names is None:
if n_labels is None:
n_labels = lbl.max() + 1 # +1 for bg_label 0
else:
if n_labels is None:
n_labels = len(label_names)
else:
assert n_labels == len(label_names)
cmap = label_colormap(n_labels)
cmap = (cmap * 255).astype(np.uint8)
lbl_viz = cmap[lbl]
if img is not None:
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
lbl_viz = alpha * lbl_viz + (1 - alpha) * img_gray
lbl_viz = lbl_viz.astype(np.uint8)
np.random.seed(1234)
mask_unlabeled = lbl == -1
lbl_viz[mask_unlabeled] = \
np.random.random(size=(mask_unlabeled.sum(), 3)) * 255
if label_names is None:
return lbl_viz
for label in np.unique(lbl):
if label == -1:
continue # unlabeled
mask = lbl == label
if 1. * mask.sum() / mask.size < thresh_suppress:
continue
mask = (mask * 255).astype(np.uint8)
y, x = scipy.ndimage.center_of_mass(mask)
y, x = map(int, [y, x])
if lbl[y, x] != label:
Y, X = np.where(mask)
point_index = np.random.randint(0, len(Y))
y, x = Y[point_index], X[point_index]
text = label_names[label]
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.7
thickness = 2
text_size, baseline = cv2.getTextSize(
text, font_face, font_scale, thickness)
color = get_text_color(lbl_viz[y, x])
cv2.putText(lbl_viz, text,
(x - text_size[0] // 2, y),
font_face, font_scale, color, thickness)
return lbl_viz
def mask_to_bbox(mask):
"""Convert binary mask image to bounding box.
Parameters
----------
mask: numpy.ndarray, (H, W), bool
Boolean mask.
Returns
-------
bbox: tuple of int, (4,)
x1, y1, x2, y2.
"""
warnings.warn(
'mask_to_bbox is deprecated. Use masks_to_bbox '
'which returns array of (y1, x1, y2, x2).'
)
assert mask.dtype == bool
where = np.argwhere(mask)
(y1, x1), (y2, x2) = where.min(0), where.max(0) + 1
return x1, y1, x2, y2
def masks_to_bboxes(masks):
"""Convert binary mask image to bounding box.
Parameters
----------
masks: numpy.ndarray, (N, H, W), bool
Boolean masks.
Returns
-------
bboxes: tuple of int, (N, 4)
Each bbox represents (y1, x1, y2, x2).
"""
bboxes = np.zeros((len(masks), 4), dtype=np.int32)
for i, mask in enumerate(masks):
assert mask.dtype == bool
where = np.argwhere(mask)
(y1, x1), (y2, x2) = where.min(0), where.max(0) + 1
bboxes[i] = (y1, x1, y2, x2)
return bboxes
def mask_to_lbl(mask, label):
"""Convert mask to label image."""
lbl = np.empty(mask.shape, dtype=np.int32)
lbl[mask] = label
lbl[~mask] = -1
return lbl
def resize(
image,
height=None,
width=None,
fy=None,
fx=None,
size=None,
interpolation=cv2.INTER_LINEAR,
):
"""Resize image with cv2 resize function.
Parameters
----------
image: numpy.ndarray
Source image.
height, width: None or int
Target height or width.
fy, fx: None or float
Target height or width scale.
size: None or int or float
Target image size.
interpolation: int
Interpolation flag. (default: cv2.INTER_LINEAR == 1)
"""
hw_ratio = 1. * image.shape[0] / image.shape[1] # h / w
if height is not None or width is not None:
if height is None:
height = int(round(hw_ratio * width))
elif width is None:
width = int(round(1 / hw_ratio * height))
assert fy is None
assert fx is None
assert size is None
return cv2.resize(image, (width, height), interpolation=interpolation)
elif fy is not None or fx is not None:
if fy is None:
fy = fx
elif fx is None:
fx = fy
assert height is None
assert width is None
assert size is None
elif size is not None:
assert height is None
assert width is None
assert fy is None
assert fx is None
fx = fy = np.sqrt(1. * size / (image.shape[0] * image.shape[1]))
else:
raise ValueError
return cv2.resize(
image, None, None, fx=fx, fy=fy, interpolation=interpolation)
def resize_mask(mask, *args, **kwargs):
"""Resize mask in float space.
Parameters
----------
mask: numpy.ndarray
Source mask whose size must be (H, W) and has bool dtype.
See grasp_fusion_lib.image.resize for other parameters.
"""
assert mask.dtype == bool
assert mask.ndim == 2
mask = mask.astype(float)
mask = resize(mask, *args, **kwargs)
mask = mask > 0.5
return mask
def resize_lbl(lbl, *args, **kwargs):
"""Resize lbl in channel space.
Parameters
----------
lbl: numpy.ndarray
Source mask whose size must be (H, W) and has int32 dtype.
See grasp_fusion_lib.image.resize for other parameters.
"""
assert lbl.dtype == np.int32
assert lbl.ndim == 2
# [label -> onehot] -> [resize] -> [onehot -> label]
min_value = lbl.min()
lbl -= min_value # shift to make the min_value to be 0
lbl_score = (np.arange(lbl.max() + 1) == lbl[..., None]).astype(np.float32)
lbl_score = resize(lbl_score, *args, **kwargs)
lbl_score = np.atleast_3d(lbl_score)
lbl = np.argmax(lbl_score, axis=2)
lbl = lbl.astype(np.int32)
lbl += min_value # restore the min_value
return lbl
|
bsd-3-clause
|
RDCEP/ggcmi
|
bin/plot.isi1/blmap.isi1.py
|
1
|
9804
|
#!/usr/bin/env python
# import modules
import matplotlib
from os.path import splitext
from shapefile import Reader
from itertools import product
import matplotlib.pyplot as plt
from optparse import OptionParser
from netCDF4 import Dataset as nc
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import LineCollection
from numpy.ma import masked_array, masked_where, median, reshape
from numpy import zeros, ones, resize, meshgrid, arange, array, cos, pi, where
# define colormaps
cdict_beta = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.5, 0.0)),
'blue': ((0.0, 0.0, 0.5),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0))
}
cdict_lambda = {'red': ((0.0, 0.5, 0.5),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))
}
# parse inputs
parser = OptionParser()
parser.add_option("-i", "--infile", dest = "infile", default = "metrics.nc4", type = "string",
help = "Metrics file", metavar = "FILE")
parser.add_option("-c", "--crop", dest = "crop", default = "maize", type = "string",
help = "Crop (or all)")
parser.add_option("-a", "--aggfile", dest = "aggfile", default = "fpu.mask.nc4", type = "string",
help = "Aggregation file", metavar = "FILE")
parser.add_option("-r", "--hareafile", dest = "hareafile", default = "all.fpu.nc4", type = "string",
help = "Harvested area file", metavar = "FILE")
parser.add_option("-s", "--shapefile", dest = "shapefile", default = "fpu", type = "string",
help = "Shape file", metavar = "FILE")
parser.add_option("-w", "--weightfile", dest = "weightfile", default = "maize.nc4", type = "string",
help = "Weight file", metavar = "FILE")
parser.add_option("-p", "--percent", dest = "percent", default = "0.1", type = "float",
help = "Percent threshold")
parser.add_option("-m", "--mapfile", dest = "mapfile", default = "map.png", type = "string",
help = "Output map file", metavar = "FILE")
parser.add_option("-n", "--ncfile", dest = "ncfile", default = "map.nc4", type = "string",
help = "Output netcdf data file", metavar = "FILE")
options, args = parser.parse_args()
infile = options.infile
crop = options.crop
aggfile = options.aggfile
hareafile = options.hareafile
shapefile = options.shapefile
weightfile = options.weightfile
percent = options.percent
mapfile = options.mapfile
ncfile = options.ncfile
cals = {'maize': 3.60, 'wheat': 3.34, 'soy': 3.35, 'rice': 2.80}
with nc(infile) as f:
fpu = f.variables['fpu'][:]
nfpu = len(fpu)
# load aggregation file
with nc(aggfile) as f:
lats, lons = f.variables['lat'][:], f.variables['lon'][:]
fpumap = f.variables['fpu'][:]
nlats, nlons = len(lats), len(lons)
# load weight file
with nc(weightfile) as f:
harea = f.variables['sum'][:]
# load area file
careas = {}
with nc(hareafile) as f:
cfpu = f.variables['fpu'][:]
for c in ['maize', 'wheat', 'soy', 'rice']:
careas[c] = f.variables['area_' + c][:]
# find valid fpus
tarea = 100 * (111.2 / 2) ** 2 * cos(pi * lats / 180)
tarea = resize(tarea, (nlons, nlats)).T
validfpus = []
for i in range(nfpu):
hareafpu = harea[fpumap == fpu[i]].sum()
tareafpu = tarea[fpumap == fpu[i]].sum()
if hareafpu / tareafpu > percent / 100.:
validfpus.append(fpu[i])
# load shape file
r = Reader(shapefile)
shapes = r.shapes()
records = r.records()
models = ['epic', 'gepic', 'lpj-guess', 'lpjml', 'pdssat', 'pegasus'] # exclude image
gcms = ['gfdl-esm2m', 'hadgem2-es', 'ipsl-cm5a-lr', 'miroc-esm-chem', 'noresm1-m']
crops = ['maize', 'wheat', 'soy', 'rice'] if crop == 'all' else [crop]
co2s = ['co2', 'noco2']
hadgemidx = gcms.index('hadgem2-es')
nm, ng, ncr, nco2 = len(models), len(gcms), len(crops), len(co2s)
# variables
sh = (nm, ng, ncr, 3, nfpu, nco2)
dy26arr = masked_array(zeros(sh), mask = ones(sh))
dy85arr = masked_array(zeros(sh), mask = ones(sh))
with nc(infile) as f:
for m, g, c, co in product(range(nm), range(ng), range(ncr), range(nco2)):
var = 'delta_yield_26_fpu_%s_%s_%s_%s' % (models[m], gcms[g], crops[c], co2s[co])
if var in f.variables:
dy26arr[m, g, c, :, :, co] = f.variables[var][:, -3 :].T # last three decades
var = 'delta_yield_85_fpu_%s_%s_%s_%s' % (models[m], gcms[g], crops[c], co2s[co])
if var in f.variables:
dy85arr[m, g, c, :, :, co] = f.variables[var][:, -3 :].T
# weights
weights = masked_array(zeros(sh), mask = ones(sh))
areas = masked_array(zeros(sh), mask = ones(sh))
for i in range(ncr):
weights[:, :, i] = cals[crops[i]]
for f in range(nfpu):
fpuidx = where(cfpu == fpu[f])[0][0]
areas[:, :, i, :, f] = careas[crops[i]][fpuidx]
weights = masked_where(dy26arr.mask, weights) # mask
areas = masked_where(dy26arr.mask, areas)
# average over crops and decades
dy26arr = (dy26arr * weights * areas).sum(axis = 3).sum(axis = 2) / areas.sum(axis = 3).sum(axis = 2)
dy85arr = (dy85arr * weights * areas).sum(axis = 3).sum(axis = 2) / areas.sum(axis = 3).sum(axis = 2)
barr = masked_array(zeros((3, nfpu)), mask = ones((3, nfpu)))
larr = masked_array(zeros((3, nfpu)), mask = ones((3, nfpu)))
# hadgem noco2
dy26m = median(dy26arr[:, hadgemidx, :, 1], axis = 0)
dy85m = median(dy85arr[:, hadgemidx, :, 1], axis = 0)
negy = dy85m < -0.01
barr[0, negy] = 100 * (1 - dy26m[negy] / dy85m[negy])
posy = dy85m > 0.01
larr[0, posy] = 100 * (dy26m[posy] / dy85m[posy] - 1)
# hadgem co2
dy26m = median(dy26arr[:, hadgemidx, :, 0], axis = 0)
dy85m = median(dy85arr[:, hadgemidx, :, 0], axis = 0)
negy = dy85m < -0.01
barr[1, negy] = 100 * (1 - dy26m[negy] / dy85m[negy])
posy = dy85m > 0.01
larr[1, posy] = 100 * (dy26m[posy] / dy85m[posy] - 1)
# all co2
dy26m = median(reshape(dy26arr[:, :, :, 0], (nm * ng, nfpu)), axis = 0)
dy85m = median(reshape(dy85arr[:, :, :, 0], (nm * ng, nfpu)), axis = 0)
negy = dy85m < -0.01
barr[2, negy] = 100 * (1 - dy26m[negy] / dy85m[negy])
posy = dy85m > 0.01
larr[2, posy] = 100 * (dy26m[posy] / dy85m[posy] - 1)
filename, ext = splitext(mapfile)
mapfiles = [filename + '.noco2' + ext, filename + '.co2.hadgem' + ext, filename + '.co2' + ext]
filename, ext = splitext(ncfile)
ncfiles = [filename + '.noco2' + ext, filename + '.co2.hadgem' + ext, filename + '.co2' + ext]
for i in range(len(barr)):
# rasterize
bmap = masked_array(zeros((nlats, nlons)), mask = ones((nlats, nlons)))
lmap = masked_array(zeros((nlats, nlons)), mask = ones((nlats, nlons)))
for j in range(len(validfpus)):
fpuidx = where(fpu == validfpus[j])[0][0]
bmap[fpumap == validfpus[j]] = barr[i, fpuidx]
lmap[fpumap == validfpus[j]] = larr[i, fpuidx]
# plot map and fpu boundaries
plt.figure()
ax = plt.subplot(111)
m = Basemap(llcrnrlon = -180, llcrnrlat = -60, urcrnrlon = 180, urcrnrlat = 90, \
resolution = 'c', projection = 'cyl')
for record, shape in zip(records, shapes):
slons, slats = zip(*shape.points)
data = array(m(slons, slats)).T
if len(shape.parts) == 1:
segs = [data,]
else:
segs = []
for j in range(1, len(shape.parts)):
index = shape.parts[j - 1]
index2 = shape.parts[j]
segs.append(data[index : index2])
segs.append(data[index2 :])
lines = LineCollection(segs, antialiaseds = (1,))
lines.set_edgecolors('k')
lines.set_linewidth(0.1)
ax.add_collection(lines)
# plot variable map
glon, glat = meshgrid(lons, lats)
x, y = m(glon, glat)
cs1 = m.pcolor(x, y, bmap, vmin = -100, vmax = 100, cmap = matplotlib.colors.LinearSegmentedColormap('BlueGreen', cdict_beta))
cs2 = m.pcolor(x, y, lmap, vmin = -100, vmax = 0, cmap = matplotlib.colors.LinearSegmentedColormap('BlueGreen', cdict_lambda))
m.drawcoastlines()
m.drawmapboundary()
m.drawparallels(arange(90, -90, -30), labels = [1, 0, 0, 0])
m.drawmeridians(arange(-180, 180, 60), labels = [0, 0, 0, 1])
# save
plt.savefig(mapfiles[i])
plt.close()
# write data file
with nc(ncfiles[i], 'w') as f:
f.createDimension('lat', len(lats))
latvar = f.createVariable('lat', 'f8', 'lat')
latvar[:] = lats
latvar.units = 'degrees_north'
latvar.long_name = 'latitude'
f.createDimension('lon', len(lons))
lonvar = f.createVariable('lon', 'f8', 'lon')
lonvar[:] = lons
lonvar.units = 'degrees_east'
lonvar.long_name = 'longitude'
f.createDimension('fpu', nfpu)
fpuvar = f.createVariable('fpu', 'i4', 'fpu')
fpuvar[:] = fpu
fpuvar.units = 'FPU index'
fpuvar.long_name = '309 Food Producing Units'
bmvar = f.createVariable('beta', 'f8', ('lat', 'lon'), zlib = True, shuffle = False, complevel = 9, fill_value = 1e20)
bmvar[:] = bmap
bmvar.long_name = '100 * (1 - delta yield RCP 2.6 / delta yield RCP 8.5)'
lmvar = f.createVariable('lambda', 'f8', ('lat', 'lon'), zlib = True, shuffle = False, complevel = 9, fill_value = 1e20)
lmvar[:] = lmap
lmvar.long_name = '100 * (delta yield RCP 2.6 / delta yield RCP 8.5 - 1)'
|
agpl-3.0
|
ahaberlie/MetPy
|
docs/conf.py
|
1
|
11490
|
#!/usr/bin/env python3
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import datetime
import os
import sys
import metpy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'matplotlib.sphinxext.plot_directive',
'm2r'
]
sphinx_gallery_conf = {
'doc_module': ('metpy',),
'reference_url': {
'metpy': None,
},
'examples_dirs': [os.path.join('..', 'examples'), os.path.join('..', 'tutorials')],
'gallery_dirs': ['examples', 'tutorials'],
'filename_pattern': r'\.py',
'backreferences_dir': os.path.join('api', 'generated'),
'default_thumb_file': os.path.join('_static', 'metpy_150x150_white_bg.png'),
'abort_on_example_error': True
}
# Turn off code and image links for embedded mpl plots
plot_html_show_source_link = False
plot_html_show_formats = False
# Set up mapping for other projects' docs
intersphinx_mapping = {
# 'pint': ('http://pint.readthedocs.io/en/stable/', None),
'matplotlib': ('https://matplotlib.org/', None),
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None)
}
# Tweak how docs are formatted
napoleon_use_rtype = False
# Control main class documentation
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# Controlling automatically generating summary tables in the docs
autosummary_generate = True
autosummary_imported_members = True
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MetPy'
# noinspection PyShadowingBuiltins
copyright = ('2019, MetPy Developers. Development supported by National Science Foundation grants '
'AGS-1344155, OAC-1740315, and AGS-1901712.')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
verinfo = metpy.__version__
full_version = verinfo.split('+')[0]
version = full_version.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = verinfo
rst_prolog = '''
.. |cite_version| replace:: {0}
.. |cite_year| replace:: {1:%Y}
.. |access_date| replace:: {1:%d %B %Y}
'''.format(full_version, datetime.utcnow())
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'canonical_url': 'https://unidata.github.io/MetPy/latest/'}
if 'sphinx_rtd_theme' in vars() and sphinx_rtd_theme.__version__ == '0.2.5b1.post1':
html_theme_options['versions'] = {'latest': '../latest', 'dev': '../dev'}
# Extra variables that will be available to the templates. Used to create the
# links to the Github repository sources and issues
html_context = {
'doc_path': 'docs',
'galleries': sphinx_gallery_conf['gallery_dirs'],
'gallery_dir': dict(zip(sphinx_gallery_conf['gallery_dirs'],
sphinx_gallery_conf['examples_dirs'])),
'api_dir': 'api/generated',
'github_repo': 'Unidata/MetPy',
'github_version': 'master', # Make changes to the master branch
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = ' '.join((project, version))
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.join('_static', 'metpy_150x150.png')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join('_static', 'metpy_32x32.ico')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['theme_override.css']
html_js_files = ['pop_ver.js']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y at %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MetPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MetPy.tex', 'MetPy Documentation',
'MetPy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'metpy', 'MetPy Documentation',
['MetPy Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MetPy', 'MetPy Documentation',
'MetPy Developers', 'MetPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
linkcheck_ignore = [r'https://www\.youtube\.com/watch\?v=[\d\w\-_]+',
r'https://codecov.io/github/Unidata/MetPy',
r'https://youtu\.be/[\d\w\-_]+']
|
bsd-3-clause
|
DemocracyClub/yournextrepresentative
|
ynr/apps/sopn_parsing/migrations/0001_initial.py
|
1
|
2107
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-26 13:09
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("official_documents", "0024_add_relevant_pages")]
operations = [
migrations.CreateModel(
name="ParsedSOPN",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("raw_data", models.TextField()),
(
"raw_data_type",
models.CharField(default="pandas", max_length=255),
),
("parsed_data", models.TextField(null=True)),
(
"status",
models.CharField(default="unparsed", max_length=255),
),
(
"sopn",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to="official_documents.OfficialDocument",
),
),
],
options={"abstract": False},
)
]
|
agpl-3.0
|
bsipocz/astroML
|
examples/datasets/plot_sdss_specgals.py
|
2
|
1802
|
"""
SDSS Spectroscopic Galaxy Sample
--------------------------------
This figure shows photometric colors of the SDSS spectroscopic galaxy
sample.
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from astropy.visualization import hist
from astroML.datasets import fetch_sdss_specgals
data = fetch_sdss_specgals()
#------------------------------------------------------------
# plot the RA/DEC in an area-preserving projection
RA = data['ra']
DEC = data['dec']
# convert coordinates to degrees
RA -= 180
RA *= np.pi / 180
DEC *= np.pi / 180
ax = plt.axes(projection='mollweide')
ax.grid()
plt.scatter(RA, DEC, s=1, lw=0, c=data['z'], cmap=plt.cm.copper,
vmin=0, vmax=0.4)
plt.title('SDSS DR8 Spectroscopic Galaxies')
cb = plt.colorbar(cax=plt.axes([0.05, 0.1, 0.9, 0.05]),
orientation='horizontal',
ticks=np.linspace(0, 0.4, 9))
cb.set_label('redshift')
#------------------------------------------------------------
# plot the r vs u-r color-magnitude diagram
u = data['modelMag_u']
r = data['modelMag_r']
rPetro = data['petroMag_r']
plt.figure()
ax = plt.axes()
plt.scatter(u - r, rPetro, s=1, lw=0, c=data['z'], cmap=plt.cm.copper,
vmin=0, vmax=0.4)
plt.colorbar(ticks=np.linspace(0, 0.4, 9)).set_label('redshift')
plt.xlim(0.5, 5.5)
plt.ylim(18, 12.5)
plt.xlabel('u-r')
plt.ylabel('rPetrosian')
#------------------------------------------------------------
# plot a histogram of the redshift
plt.figure()
hist(data['z'], bins='knuth',
histtype='stepfilled', ec='k', fc='#F5CCB0')
plt.xlim(0, 0.4)
plt.xlabel('z (redshift)')
plt.ylabel('dN/dz(z)')
plt.show()
|
bsd-2-clause
|
chrsrds/scikit-learn
|
sklearn/utils/multiclass.py
|
1
|
15256
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, str) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
return votes + transformed_confidences
|
bsd-3-clause
|
mugizico/scikit-learn
|
sklearn/tree/tree.py
|
113
|
34767
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
claesenm/HPOlib
|
HPOlib/Plotting/plotBranin.py
|
5
|
5471
|
#!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
import matplotlib.cm
import matplotlib.gridspec as gridSpec
import matplotlib.pyplot
import numpy as np
import HPOlib.benchmark_functions
import HPOlib.Plotting.plot_util as plotUtil
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_contour(trial_list, name_list, save="", title=""):
# constraints:
# -5 <= x <= 10, 0 <= y <= 15
# three global optima: (-pi, 12.275), (pi, 2.275), (9.42478, 2.475), where
# branin = 0.397887
markers = itertools.cycle(['o', 's', '^', 'x'])
colors = itertools.cycle(['b', 'g', 'r', 'k'])
size = 5
# Get handles
ratio = 5
gs = gridSpec.GridSpec(ratio, 1)
fig = matplotlib.pyplot.figure(1, dpi=100)
fig.suptitle(title)
ax = matplotlib.pyplot.subplot(gs[0:ratio, :])
ax.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
xopt = [-np.pi, np.pi, 9.42478]
yopt = [12.275, 2.275, 2.475]
# Plot Branin
step = 0.1
xi = np.arange(-5, 10 + step, step)
yi = np.arange(-0, 15 + step, step)
z = np.zeros([len(xi), len(yi)])
for i in range(len(xi)):
for j in range(len(yi)):
#z[j, i] = np.power(np.e, branin.branin({"x":xi[i], "y":yi[j]}))
z[j, i] = HPOlib.benchmark_functions.branin(x=xi[i], y=yi[j])
xi, yi = np.meshgrid(xi, yi)
cax = ax.contourf(xi, yi, z, 50, cmap=matplotlib.cm.gray)
fig.colorbar(cax)
# Plot Optimums after all work is done
matplotlib.pyplot.scatter(xopt, yopt, marker="o", facecolor='w', edgecolor='w', s=20*size, label="Optimum")
# Get values
for opt in range(len(name_list)):
print name_list[opt], "has", len(trial_list[opt]['trials']), "samples"
m = markers.next()
c = colors.next()
x = np.zeros(len(trial_list[opt]["trials"]))
y = np.zeros(len(trial_list[opt]["trials"]))
for i in range(len(x)):
if '-x' in trial_list[opt]["trials"][i]["params"]:
x[i] = float(trial_list[opt]["trials"][i]["params"]["-x"].strip("'"))
y[i] = float(trial_list[opt]["trials"][i]["params"]["-y"].strip("'"))
else:
x[i] = float(trial_list[opt]["trials"][i]["params"]["x"].strip("'"))
y[i] = float(trial_list[opt]["trials"][i]["params"]["y"].strip("'"))
matplotlib.pyplot.scatter(x[0:10], y[0:10], marker=m,
s=size, facecolors=c, linewidth=0.1)
matplotlib.pyplot.scatter(x[10:-10], y[10:-10], marker=m,
linewidth=0.1, s=4*size, facecolors=c)
matplotlib.pyplot.scatter(x[-10:-1], y[-10:-1], marker=m,
linewidth=0.1, s=6*size, facecolors=c, label=name_list[opt][0])
matplotlib.pyplot.xlim([-5, 10])
matplotlib.pyplot.ylim([-0, 15])
matplotlib.pyplot.xlabel("X")
matplotlib.pyplot.ylabel("Y")
# Describe the plot
matplotlib.pyplot.title(title)
leg = matplotlib.pyplot.legend(loc="best", fancybox=True)
leg.get_frame().set_alpha(0.5)
if save != "":
matplotlib.pyplot.subplots_adjust(top=0.85)
matplotlib.pyplot.savefig(save, dpi=600, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
matplotlib.pyplot.show()
def main():
prog = "python plotBranin.py whatIsThis <onepkl> [whatIsThis] <onepkl>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
parser.add_argument("-s", "--save", dest="save", default="",
help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title", default="",
help="Optional supertitle for plot")
args, unknown = parser.parse_known_args()
if len(unknown) % 2 != 0:
print "Wrong number of arguments", len(args)
print prog
sys.exit(1)
pkl_list, name_list = plotUtil.get_pkl_and_name_list(unknown)
trial_list = list()
for i in range(len(name_list)):
result_file = pkl_list[i][0]
fh = open(result_file, "r")
trials = cPickle.load(fh)
fh.close()
trial_list.append(trials)
plot_contour(trial_list=trial_list, name_list=name_list, save=args.save, title=args.title)
if __name__ == "__main__":
main()
|
gpl-3.0
|
Mazecreator/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py
|
111
|
7865
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
apache-2.0
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/doc/mpl_examples/user_interfaces/interactive2.py
|
9
|
10400
|
#!/usr/bin/env python
from __future__ import print_function
# GTK Interactive Console
# (C) 2003, Jon Anderson
# See www.python.org/2.2/license.html for
# license details.
#
import gtk
import gtk.gdk
import code
import os, sys
import pango
import __builtin__
import __main__
banner = """GTK Interactive Python Console
Thanks to Jon Anderson
%s
""" % sys.version
banner += """
Welcome to matplotlib.
help(matplotlib) -- some general information about matplotlib
help(plotting) -- shows a list of plot specific commands
"""
class Completer:
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self, locals):
self.locals = locals
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,__builtin__.__dict__.keys(),__main__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, __main__.__dict__, self.locals)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class OutputStream:
"""
A Multiplexing output stream.
It can replace another stream, and tee output to the original stream and too
a GTK textview.
"""
def __init__(self,view,old_out,style):
self.view = view
self.buffer = view.get_buffer()
self.mark = self.buffer.create_mark("End",self.buffer.get_end_iter(), False )
self.out = old_out
self.style = style
self.tee = 1
def write(self,text):
if self.tee:
self.out.write(text)
end = self.buffer.get_end_iter()
if not self.view == None:
self.view.scroll_to_mark(self.mark, 0, True, 1, 1)
self.buffer.insert_with_tags(end,text,self.style)
class GTKInterpreterConsole(gtk.ScrolledWindow):
"""
An InteractiveConsole for GTK. It's an actual widget,
so it can be dropped in just about anywhere.
"""
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy (gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.text = gtk.TextView()
self.text.set_wrap_mode(True)
self.interpreter = code.InteractiveInterpreter()
self.completer = Completer(self.interpreter.locals)
self.buffer = []
self.history = []
self.banner = banner
self.ps1 = ">>> "
self.ps2 = "... "
self.text.add_events( gtk.gdk.KEY_PRESS_MASK )
self.text.connect( "key_press_event", self.key_pressed )
self.current_history = -1
self.mark = self.text.get_buffer().create_mark("End",self.text.get_buffer().get_end_iter(), False )
#setup colors
self.style_banner = gtk.TextTag("banner")
self.style_banner.set_property( "foreground", "saddle brown" )
self.style_ps1 = gtk.TextTag("ps1")
self.style_ps1.set_property( "foreground", "DarkOrchid4" )
self.style_ps1.set_property( "editable", False )
self.style_ps1.set_property("font", "courier" )
self.style_ps2 = gtk.TextTag("ps2")
self.style_ps2.set_property( "foreground", "DarkOliveGreen" )
self.style_ps2.set_property( "editable", False )
self.style_ps2.set_property("font", "courier" )
self.style_out = gtk.TextTag("stdout")
self.style_out.set_property( "foreground", "midnight blue" )
self.style_err = gtk.TextTag("stderr")
self.style_err.set_property( "style", pango.STYLE_ITALIC )
self.style_err.set_property( "foreground", "red" )
self.text.get_buffer().get_tag_table().add(self.style_banner)
self.text.get_buffer().get_tag_table().add(self.style_ps1)
self.text.get_buffer().get_tag_table().add(self.style_ps2)
self.text.get_buffer().get_tag_table().add(self.style_out)
self.text.get_buffer().get_tag_table().add(self.style_err)
self.stdout = OutputStream(self.text,sys.stdout,self.style_out)
self.stderr = OutputStream(self.text,sys.stderr,self.style_err)
sys.stderr = self.stderr
sys.stdout = self.stdout
self.current_prompt = None
self.write_line(self.banner, self.style_banner)
self.prompt_ps1()
self.add(self.text)
self.text.show()
def reset_history(self):
self.history = []
def reset_buffer(self):
self.buffer = []
def prompt_ps1(self):
self.current_prompt = self.prompt_ps1
self.write_line(self.ps1,self.style_ps1)
def prompt_ps2(self):
self.current_prompt = self.prompt_ps2
self.write_line(self.ps2,self.style_ps2)
def write_line(self,text,style=None):
start,end = self.text.get_buffer().get_bounds()
if style==None:
self.text.get_buffer().insert(end,text)
else:
self.text.get_buffer().insert_with_tags(end,text,style)
self.text.scroll_to_mark(self.mark, 0, True, 1, 1)
def push(self, line):
self.buffer.append(line)
if len(line) > 0:
self.history.append(line)
source = "\n".join(self.buffer)
more = self.interpreter.runsource(source, "<<console>>")
if not more:
self.reset_buffer()
return more
def key_pressed(self,widget,event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
return self.execute_line()
if event.keyval == gtk.gdk.keyval_from_name('Up'):
self.current_history = self.current_history - 1
if self.current_history < - len(self.history):
self.current_history = - len(self.history)
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Down'):
self.current_history = self.current_history + 1
if self.current_history > 0:
self.current_history = 0
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name( 'Home'):
l = self.text.get_buffer().get_line_count() - 1
start = self.text.get_buffer().get_iter_at_line_offset(l,4)
self.text.get_buffer().place_cursor(start)
return True
elif event.keyval == gtk.gdk.keyval_from_name( 'space') and event.state & gtk.gdk.CONTROL_MASK:
return self.complete_line()
return False
def show_history(self):
if self.current_history == 0:
return True
else:
self.replace_line( self.history[self.current_history] )
return True
def current_line(self):
start,end = self.current_line_bounds()
return self.text.get_buffer().get_text(start,end, True)
def current_line_bounds(self):
txt_buffer = self.text.get_buffer()
l = txt_buffer.get_line_count() - 1
start = txt_buffer.get_iter_at_line(l)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = txt_buffer.get_end_iter()
return start,end
def replace_line(self,txt):
start,end = self.current_line_bounds()
self.text.get_buffer().delete(start,end)
self.write_line(txt)
def execute_line(self, line=None):
if line is None:
line = self.current_line()
self.write_line("\n")
else:
self.write_line(line + "\n")
more = self.push(line)
self.text.get_buffer().place_cursor(self.text.get_buffer().get_end_iter())
if more:
self.prompt_ps2()
else:
self.prompt_ps1()
self.current_history = 0
self.window.raise_()
return True
def complete_line(self):
line = self.current_line()
tokens = line.split()
token = tokens[-1]
completions = []
p = self.completer.complete(token,len(completions))
while p != None:
completions.append(p)
p = self.completer.complete(token, len(completions))
if len(completions) != 1:
self.write_line("\n")
self.write_line("\n".join(completions), self.style_ps1)
self.write_line("\n")
self.current_prompt()
self.write_line(line)
else:
i = line.rfind(token)
line = line[0:i] + completions[0]
self.replace_line(line)
return True
def main():
w = gtk.Window()
console = GTKInterpreterConsole()
console.set_size_request(640,480)
w.add(console)
def destroy(arg=None):
gtk.main_quit()
def key_event(widget,event):
if gtk.gdk.keyval_name( event.keyval) == 'd' and \
event.state & gtk.gdk.CONTROL_MASK:
destroy()
return False
w.connect("destroy", destroy)
w.add_events( gtk.gdk.KEY_PRESS_MASK )
w.connect( 'key_press_event', key_event)
w.show_all()
console.execute_line('import matplotlib')
console.execute_line("matplotlib.use('GTKAgg')")
console.execute_line('matplotlib.interactive(1)')
console.execute_line('from pylab import *')
if len(sys.argv)>1:
fname = sys.argv[1]
if not os.path.exists(fname):
print('%s does not exist' % fname)
for line in file(fname):
line = line.strip()
console.execute_line(line)
gtk.main()
if __name__ == '__main__':
main()
|
mit
|
MastenSpace/pysur
|
SensitivityAnalysis/design_optimizer.py
|
1
|
22314
|
"""
Copyright 2017 Masten Space Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Jack Nelson <[email protected]>
"""
import sys
import os
import math
import time
import itertools
import pdb
import random
from sklearn import preprocessing
import numpy as np
import pickle
import scipy.optimize as optimize
import mvpa2.suite as pa
###########################
thispath = os.path.dirname(os.path.abspath(__file__))
config_relative_path="/../"
sys.path.append(thispath + config_relative_path)
import config
import GenSAData as SA
###########################
class design_optimizer():
"""
Class that constructs an optimization problem based on surrogate models, input parameters,
and weights to control the optimization of output parameters.
"""
def __init__(self, **kwargs):
"""
Constructor for an optimizer object with a cost function based on a set of surrogate
models and output parameter weights.
kwargs:
models:
list of sklearn SVR objects. Optional.
List of trained support-vector-regression (SVR) objects representing surrogate models.
model_files:
list of pickle files. Optional.
A list of pickle files containing surrogate models. Surrogate models are loaded from this file
into a list used to make predictions on input sample data.
Overrides 'models' parameter if set.
scalers:
list of sklearn MinMaxScaler objects. Optional.
Data scaler objects used to scale the surrogate model outputs to their proper ranges. These
scalers should be generated simultaneously with the surrogate models.
datascaler_files:
list of pickle files. Optional.
A list of pickle files containing scikit-learn MinMaxScaler objects used to inverse_transform
the surrogate model predictions back to their normal ranges.
Overrides 'scalers' parameter if set.
output_weights:
list of signed floats. Must be length N.
List of penalty/reward weights that drive optimization.
target_goals:
list of floats. Must be length N.
The goal values for each model.
samples_file:
pickle file
An array with samples arranged row-wise and features arranged column-wise
optimize_title:
string
The name to use for this optimizer object. Used to title plots, results, output files...
target_names:
list of strings.
A list of the target (surrogate model output) names. Each string in the list corresponds
to the outputs of the surrogate model with the same index in the list of surrogate models.
loaded from file.
costf_params:
list of strings
A list of the names of features (input parameters) or targets (output parameters) that
will be used in cost function evaluation.
target_weights:
list of floats
A list of weight values to use for cost function evaluation. Each weight in the list corresponds
to the weight value for the parameter with the same index in costf_parameters (see above)
som_iter: integer. Optional. Default 200
The number of training iterations to use for training Self-Organizing Maps (SOMs).
N_sample_skip:
integer. Optional. Default 1
Downsample the input samples further by setting this to > 1. Sets how many samples to skip
over when generating the SOMs or optimizing. If N_sample_skip = 100, the number of samples used
will be the number of input samples loaded from samples_file divided by 100.
output_dir: string.
Optional. Default './optimize_outputs'
Directory in which all output data products and plots will be written.
param_bounds_file:
file. Optional. Default config.param_file
som_dimensions:
integer. Optional.
Default is the sqrt of the number of samples being mapped.
The square dimension size of the som map.
debug:
Boolean. Default False.
"""
# set class attributes with the kwargs
self.samples_file = kwargs.get('samples_file', None)
self.optimize_title = kwargs.get('optimize_title', config.optimize_title)
self.model_files = kwargs.get('model_files', config.model_files)
self.inscaler_file = kwargs.get('inscaler_file', config.inscaler_file)
self.outscaler_files = kwargs.get('datascaler_files', config.datascaler_files)
self.features = kwargs.get('feature_names', config.features)
self.targets = kwargs.get('target_names', config.target_names)
self.cost_function = kwargs.get('cost_function', config.cost_function)
self.som_iter = kwargs.get('som_iter', config.som_iter)
self.N_sample_skip = kwargs.get('N_sample_skip', config.N_sample_skip)
self.outputdir = kwargs.get('output_dir', './optimize_outputs')
self.param_bounds_file = kwargs.get('param_bounds_file', config.param_file)
self.debug = kwargs.get('debug', config.DEBUG)
self.som_dimensions = kwargs.get('som_dimensions', config.som_dimensions)
self.som_learning_rate = kwargs.get('som_learning_rate', config.som_learning_rate)
# setup attributes loaded from file
self.models = self._load_models()
self.inscaler = self._load_inscaler()
self.outscalers = self._load_outscalers()
if self.samples_file is not None:
self.samples = self.load_samples(self.samples_file)
# initialize attributes to be set later
self.som_plot = None
# stuff for versioning results (not yet implemented)
self.version_list = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omnicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega']
self.N_version = 0
if self.debug == True:
print "Number of models: ", np.shape(self.models)
print "Number of weights: ", len(self.cost_function.values())
print "Number of features: ", len(self.features)
print "Number of targets: ", len(self.targets)
def _load_models(self):
"""
Loads a list of saved surrogate models from file.
"""
surrogate_models = []
for fname in self.model_files:
with open("../SurrogateModeling/SM_outputs/%s" %fname, 'r') as pf:
temp_models = pickle.load(pf)
for model in temp_models:
surrogate_models.append(model)
print "surrogate models loaded: ", len(surrogate_models)
return surrogate_models
def _load_inscaler(self):
with open("../SurrogateModeling/SM_outputs/%s" %self.inscaler_file, 'r') as pf:
scaler = pickle.load(pf)
print "Loading inscaler"
return scaler
def _load_outscalers(self):
datascalers = []
for fname in self.outscaler_files:
with open("../SurrogateModeling/SM_outputs/%s" %fname, 'r') as pf:
temp_scalers = pickle.load(pf)
for scaler in temp_scalers:
datascalers.append(scaler)
print "outscalers loaded: ", len(datascalers)
return datascalers
def load_samples(self, sample_file):
# load the sampling data from file
with open(sample_file, 'r') as pf:
samples = pickle.load(pf)
print "samples shape: ", np.shape(samples)
return samples
def costf(self, input_sample):
"""
Evaluates the optimizer's cost function for a set of given inputs. Assumes the input_sample
is scaled between 0.0 and 1.0.
Returns:
cost: The cost value of the models for the given sample
model_outputs: A list of the predicted target values of each model for this sample
"""
model_outputs = []
cost = 0
for param in self.cost_function:
weight = self.cost_function[param]
if param in self.targets:
model_index = self.targets.index(param)
model = self.models[model_index]
model_output = model.predict(input_sample)
cost += weight * model_output
model_outputs = np.append(model_outputs, model_output)
elif param in self.features:
feature_index = self.features.index(param)
cost += weight * input_sample[feature_index]
return cost, model_outputs
def optimization_costf(self, input_sample):
"""
Evaluates the optimizer's cost function for a set of given inputs. Assumes the input_sample
is scaled between 0.0 and 1.0. Evaluates identically to costf, but returns just the cost
function evaluation and not the individual model outputs.
Returns:
cost: The cost value of the models for the given sample
"""
cost = 0
for param in self.cost_function:
weight = self.cost_function[param]
if param in self.targets:
model_index = self.targets.index(param)
model = self.models[model_index]
model_output = model.predict(input_sample)
cost += weight * model_output
elif param in self.features:
feature_index = self.features.index(param)
cost += weight * input_sample[feature_index]
return cost
def make_SOM(self):
"""
Generates a self-organizing map (SOM) from
"""
pass
def evaluate_design_space(self, samples = None, scale_outputs = True):
"""
Evaluates the optimizer's cost function across an array of sample inputs
that define a design space.
First, the optimizer's cost function is evaluated across the entire design space as
defined by the input samples for all of its surrogate models. The cost function evaluations
are then mapped using a Self-Organizing Map (SOM) to visualize the design space.
Args:
samples: 2D array
Samples arranged row-wise. Input parameters arranged by column.
scale_outputs: Boolean. Default True.
Scales the predictions from the models back to their normal ranges if True.
Returns:
"""
if samples is None and self.samples is None:
print "Error. No samples to evaluate. Need to load samples from file using load_samples, or pass an array of samples in with the kwarg 'samples'."
return
elif samples is None and self.samples is not None:
samples = self.samples
# downsample the samples for faster debugging.
if self.N_sample_skip > 1:
new_samples = []
for i in range(0,len(samples), self.N_sample_skip):
new_samples.append(samples[i])
samples = new_samples
# scale the inputs to normalized ranges to be used in the surrogate models
scaled_samples = self.inscaler.transform(samples)
# Evaluate the cost function across the entire sample space
print "Number of samples to evaluate: %d" %(len(scaled_samples))
print "Evaluating the design space..."
scores = []
model_outputs = []
start = time.time()
for i, sample in enumerate(scaled_samples):
sample_score, model_evals = self.costf(sample)
scores.append(sample_score)
model_outputs.append(model_evals)
scores = np.array(scores)
model_outputs = np.array(model_outputs)
# the outputs from the surrogate models we get from costf() are normalized. We need to scale them to their
# real ranges using the datascalers attribute, which is a list of MinMaxScalers for each target parameter
for i, column in enumerate(model_outputs.T):
model_outputs[:,i] = self.outscalers[i].inverse_transform(column)
print "Evaluating the design space... Complete. Elapsed time: %f" %(time.time() - start)
print "samples shape: ", np.shape(scaled_samples)
print "model_outputs shape: ", np.shape(model_outputs)
print "scores shape: ", np.shape(scores)
# combine the inputs, model outputs, and fitness scores for each sample into a single array
training_data = np.hstack((samples, model_outputs, np.reshape(scores, (len(scores), 1))))
# create a SimpleSOMMapper object to generate SOMs
if self.som_dimensions is None:
self.som_dimensions = math.floor(math.sqrt(len(samples)))
print "\nSOM dimensions: %dx%d" %(self.som_dimensions, self.som_dimensions)
print "SOM training iterations : %d" %self.som_iter
som = pa.SimpleSOMMapper((self.som_dimensions, self.som_dimensions), self.som_iter, learning_rate = self.som_learning_rate)
print "Training the SOM..."
train_start = time.time()
som.train(training_data)
train_stop = time.time()
train_elapsed = train_stop - train_start
print "Training the SOM... Complete. Time elapsed: %f" %(train_elapsed)
print "K shape: ", np.shape(som.K)
# check if the output directory for this SOM generation run exists. If not, create it.
som_output_dir = "%s/%s" %(self.outputdir, self.optimize_title)
if not os.path.exists(som_output_dir):
os.makedirs(som_output_dir)
import matplotlib.pyplot as plt
title_list = self.features
for target in self.targets:
title_list.append(target)
title_list.append('Cost_Function')
#print "SOM plot titles:\n", title_list
for i, som_plot_title in enumerate(title_list):
print "Mapping %s" %som_plot_title
img = plt.imshow(som.K[:,:,i], origin='lower')
ax=plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
mapped = som(training_data)
#print "Map shape: ", np.shape(mapped)
#for i, m in enumerate(mapped):
# plt.text(m[1], m[0], self.features[i], ha='center', va='center', bbox=dict(facecolor='white', alpha=0.5, lw=0))
plt.title('%s'%(som_plot_title))
plt.colorbar(orientation='vertical')
plt.draw()
plt.savefig("%s/%s_%s.png" %(som_output_dir, self.optimize_title ,som_plot_title))
plt.close()
# save the configuration parameters that were used for this generation to a text file
with open("%s/%s_parameters.cfg" %(som_output_dir, self.optimize_title), 'w') as f:
f.write("Generation Title: %s\n" %self.optimize_title)
f.write("model files: ")
for model in self.model_files:
f.write('\t' + model)
f.write("\nscaler files: ")
for scaler in self.outscaler_files:
f.write('\t' + scaler)
f.write("\ntarget names:")
for name in self.targets:
f.write('\n\t' + name)
f.write("\ntarget weights: ")
for weight in self.cost_function.values():
f.write("%1.1f" %weight)
f.write('\n')
f.write("\nSOM Training Parameters:\n")
f.write("\tdimensions: %dx%d\n" %(self.som_dimensions, self.som_dimensions))
f.write("\ttraining iterations: %d\n" %self.som_iter)
f.write("\tn_samples: %d\n" %len(scaled_samples))
f.write("\ttraining duration: %f\n" %train_elapsed)
def load_param_bounds(self):
"""
Load the lower and upper bounds (constraints) for the input parameters from the parameter
bounds file set by param_file in the root config.
"""
param_bounds = []
with open(self.param_bounds_file, 'r') as param_file:
for line in param_file:
param_bounds.append(line.rstrip('\n').split('\t'))
return param_bounds
def optimize_design(self, costf = self.optimization_costf, samples = None, maxiter = None, method = 'gradient'):
"""
Optimizes the design defined by the surrogate models across the design space defined
by the samples using nonlinear optimization method.
Any arbitrary cost function to optimize can be passed in with costf, so long as it's callable
and accepts a list of floats.
A constrained optimization problem is created using the constraints in the file param_file
set in the root config. These are the constraints that were used to generate the samples
which form the design space which is to be optimized using the cost function defined in costf
above.
Use the 'method' argument to select which optimization algorithm is used. 'gradient' selects
a constrained gradient-based optimization, while 'diffevo' selects a constrained
differential evolution algorithm.
"""
# TODO - need to scale parameter bound ranges and inverse transform the optimized result
# load the input parameter bounds from file into a list of tuples and normalize them
print "After scaling: ", bounds
if method == 'gradient':
bounds = self.load_param_bounds()
print "Normalizing parameter constraints"
bounds = [[int(param[1]), int(param[2])] for param in bounds]
print "Before scaling: ", bounds
bounds = self.inscaler.transform(bounds)
# set the initial guess to be a random sample
x0 = np.array(samples[random.randint(0,len(samples))])
x0_scaled = self.inscaler.transform(x0)
#opt_result = optimize.fmin(self.costf, x0 = x0, disp = True, full_output = True)
opt_method = 'CG'
opt_result = optimize.fmin(costf, x0 = x0_scaled, maxfun = 10000, maxiter = 10000, full_output = True, disp = True, retall = True)
#opt_result = optimize.minimize(self.optimization_costf, x0, method = opt_method, options = {'disp' : True})
print "\nConvergence success"
#print "Optimization method: ", opt_method
#print "Optimization status: ", opt_result.status
opt_param = self.inscaler.inverse_transform(opt_result[0])
print "{:<20}{:<20}{:<20}".format("Param Name", "Initial Guess", "Optimized Param")
for param_name, param_guess, param_opt, in itertools.izip(self.features, x0, opt_param):
print "{:<20}{:<20.2f}{:<20.2f}".format(param_name, param_guess, param_opt)
#print "%s\t\t%1.2f\t\t\t%1.2f" %(param_name, param_guess, param_opt)
print "Costf value at minimum: ", opt_result[1]
#print "Termination message: ", opt_result.message
print "Iterations: ", opt_result[2]
#print "allvecs:"
#print opt_result[5]
elif method == 'diffevo':
bounds = self.load_param_bounds()
xmin=[]
xmax=[]
for b in range(len(bounds)):
xmin.append(bounds[b][1])
xmax.append(bounds[b][2])
param_ranges=zip(self.inscaler.transform(np.array(xmin)),self.inscaler.transform(np.array(xmax)))
# Run the optimization search
opt_result=optimize.differential_evolution(self.optimization_costf,param_ranges,popsize=100)
opt_param = self.inscaler.inverse_transform(opt_result['x'])
print "{:<20}{:<20}".format("Param Name", "Optimized Param")
for param_name, param_opt, in itertools.izip(self.features, opt_param):
print "{:<20}{:<20.4f}".format(param_name, param_opt)
#print "%s\t\t%1.2f\t\t\t%1.2f" %(param_name, param_guess, param_opt)
print "Costf value at minimum: ", opt_result['fun']
print str(list(opt_param))
if __name__ == "__main__":
"""
#Example psuedo-code main loop implementation:
# create a design_optimizer object
samplesf = $(samples_file)
# define a cost function to use
cost_function = $(Cost function of the model target variables)
dopt = design_optimizer(samples_file = samplesf,
optimize_title = $(data title),
cost_function = cost_function,
N_sample_skip = 500,
som_iter = 100,
som_dimensions = 200)
#dopt.evaluate_design_space()
# Have the optimizer evaluate the design space defined by the samples and build an SOM
#dopt.evaluate_design_space(samples)
dopt.optimize_design()
"""
|
apache-2.0
|
Roboticmechart22/sms-tools
|
lectures/06-Harmonic-model/plots-code/harmonicModel-analysis-synthesis.py
|
24
|
1387
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import harmonicModel as HM
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
w = np.blackman(1201)
N = 2048
t = -90
nH = 100
minf0 = 130
maxf0 = 300
f0et = 7
Ns = 512
H = Ns/4
minSineDur = .1
harmDevSlope = 0.01
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
numFrames = int(hfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (vignesh.wav)')
plt.subplot(3,1,2)
yhfreq = hfreq
yhfreq[hfreq==0] = np.nan
plt.plot(frmTime, hfreq, lw=1.2)
plt.axis([0,y.size/float(fs),0,8000])
plt.title('f_h, harmonic frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('yh')
plt.tight_layout()
UF.wavwrite(y, fs, 'vignesh-harmonic-synthesis.wav')
plt.savefig('harmonicModel-analysis-synthesis.png')
plt.show()
|
agpl-3.0
|
ephes/scikit-learn
|
examples/calibration/plot_calibration.py
|
225
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
OshynSong/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
kevin-coder/tensorflow-fork
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
137
|
2219
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
seckcoder/lang-learn
|
python/sklearn/sklearn/decomposition/tests/test_sparse_pca.py
|
7
|
6089
|
# Author: Vlad Niculae
# License: BSD
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=0,
alpha=alpha).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_transform_nan():
"""
Test that SparsePCA won't return NaN when there is 0 feature in all
samples.
"""
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
unlicense
|
rsnemmen/nmmn
|
nmmn/plots.py
|
1
|
37615
|
"""
Fancy plots
==============
"""
import numpy
from matplotlib import pylab
from nmmn import sed
def plot(spec):
"""
Returns the plot of a grmonty spectrum as a pyplot object or plot it on
the screen
:param param: grmonty spectrum file
"""
s = sed.SED()
s.grmonty(spec)
pylab.plot(s.lognu, s.ll)
pylab.show()
def onehist(x,xlabel='',fontsize=12):
"""
Script that plots the histogram of x with the corresponding xlabel.
"""
pylab.clf()
pylab.rcParams.update({'font.size': fontsize})
pylab.hist(x,histtype='stepfilled')
pylab.legend()
#### Change the X-axis appropriately ####
pylab.xlabel(xlabel)
pylab.ylabel('Number')
pylab.draw()
pylab.show()
def twohists(x1,x2,xmin,xmax,range=None,x1leg='$x_1$',x2leg='$x_2$',xlabel='',fig=1,sharey=False,fontsize=12,bins1=10,bins2=10):
"""
Script that plots two histograms of quantities x1 and x2
sharing the same X-axis.
:param x1,x2: arrays with data to be plotted
:param xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
:param x1leg, x2leg: legends for each histogram
:param xlabel: self-explanatory.
:param bins1,bins2: number of bins in each histogram
:param fig: which plot window should I use?
:param range: in the form (xmin,xmax), same as range argument for hist and applied to both
histograms.
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(2,1,1)
if sharey==True:
b=fig.add_subplot(2,1,2, sharex=a, sharey=a)
else:
b=fig.add_subplot(2,1,2, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',histtype='stepfilled',range=range)
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',histtype='stepfilled',range=range)
b.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
b.set_xlabel(xlabel)
b.set_ylabel('Number',verticalalignment='bottom')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def threehists(x1,x2,x3,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',xlabel='',fig=1,sharey=False,fontsize=12):
"""
Script that plots three histograms of quantities x1, x2 and x3
sharing the same X-axis.
Arguments:
- x1,x2,x3: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range for both histograms.
- x1leg, x2leg, x3leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)',sharey=True)
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(3,1,1)
if sharey==True:
b=fig.add_subplot(3,1,2, sharex=a, sharey=a)
c=fig.add_subplot(3,1,3, sharex=a, sharey=a)
else:
b=fig.add_subplot(3,1,2, sharex=a)
c=fig.add_subplot(3,1,3, sharex=a)
a.hist(x1,label=x1leg,color='b',histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,label=x2leg,color='r',histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,label=x3leg,color='y',histtype='stepfilled')
c.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
c.set_xlabel(xlabel)
b.set_ylabel('Number')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fourhists(x1,x2,x3,x4,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',x4leg='$x_3$',xlabel='',fig=1,sharey=False,fontsize=12,bins1=10,bins2=10,bins3=10,bins4=10,line1=None,line2=None,line3=None,line4=None,line1b=None,line2b=None,line3b=None,line4b=None,loc='best'):
"""
Script that plots four histograms of quantities x1, x2, x3 and x4
sharing the same X-axis.
Arguments:
- x1,x2,x3,x4: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
or both histograms.
- x1leg, x2leg, x3leg, x4leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- bins1,bins2,...: number of bins in each histogram
- fig: which plot window should I use?
- line?: draws vertical solid lines at the positions indicated in each panel
- line?b: draws vertical dashed lines at the positions indicated in each panel
.. figure:: ../figures/fourhists.png
:scale: 100 %
:alt: Four histograms in the same figure
Four histograms in the same figure.
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(4,1,1)
if sharey==True:
b=fig.add_subplot(4,1,2, sharex=a, sharey=a)
c=fig.add_subplot(4,1,3, sharex=a, sharey=a)
d=fig.add_subplot(4,1,4, sharex=a, sharey=a)
else:
b=fig.add_subplot(4,1,2, sharex=a)
c=fig.add_subplot(4,1,3, sharex=a)
d=fig.add_subplot(4,1,4, sharex=a)
def vline(hist,value,linestyle='k'):
"""Draw vertical line"""
yax=hist.set_ylim()
hist.plot([value,value],[yax[0],yax[1]],linestyle,linewidth=2)
a.hist(x1,bins1,label=x1leg,color='b',histtype='stepfilled')
a.legend(loc=loc,frameon=False)
a.set_xlim(xmin,xmax)
if line1!=None: vline(a,line1)
if line1b!=None: vline(a,line1b,'k--')
b.hist(x2,bins2,label=x2leg,color='r',histtype='stepfilled')
b.legend(loc=loc,frameon=False)
if line2!=None: vline(b,line2)
if line2b!=None: vline(b,line2b,'k--')
c.hist(x3,bins3,label=x3leg,color='y',histtype='stepfilled')
c.legend(loc=loc,frameon=False)
if line3!=None: vline(c,line3)
if line3b!=None: vline(c,line3b,'k--')
d.hist(x4,bins4,label=x4leg,color='g',histtype='stepfilled')
d.legend(loc=loc,frameon=False)
if line4!=None: vline(d,line4)
if line4b!=None: vline(d,line4b,'k--')
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
pylab.setp(c.get_xticklabels(), visible=False)
d.set_xlabel(xlabel)
c.set_ylabel('Number')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fourcumplot(x1,x2,x3,x4,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',x4leg='$x_3$',xlabel='',ylabel='$N(x>x\')$',fig=1,sharey=False,fontsize=12,bins1=50,bins2=50,bins3=50,bins4=50):
"""
Script that plots the cumulative histograms of four variables x1, x2, x3 and x4
sharing the same X-axis. For each bin, Y is the fraction of the sample
with values above X.
Arguments:
- x1,x2,x3,x4: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
- x1leg, x2leg, x3leg, x4leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- bins1,bins2,...: number of bins in each histogram
- fig: which plot window should I use?
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
v1 Jun. 2012: inherited from fourhists.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(4,1,1)
if sharey==True:
b=fig.add_subplot(4,1,2, sharex=a, sharey=a)
c=fig.add_subplot(4,1,3, sharex=a, sharey=a)
d=fig.add_subplot(4,1,4, sharex=a, sharey=a)
else:
b=fig.add_subplot(4,1,2, sharex=a)
c=fig.add_subplot(4,1,3, sharex=a)
d=fig.add_subplot(4,1,4, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',cumulative=-True,normed=True,histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',cumulative=-True,normed=True,histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,bins3,label=x3leg,color='y',cumulative=-True,normed=True,histtype='stepfilled')
c.legend(loc='best',frameon=False)
d.hist(x4,bins4,label=x4leg,color='g',cumulative=-True,normed=True,histtype='stepfilled')
d.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
pylab.setp(c.get_xticklabels(), visible=False)
d.set_xlabel(xlabel)
c.set_ylabel(ylabel)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def threehistsx(x1,x2,x3,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',fig=1,fontsize=12,bins1=10,bins2=10,bins3=10):
"""
Script that pretty-plots three histograms of quantities x1, x2 and x3.
Arguments:
:param x1,x2,x3: arrays with data to be plotted
:param x1leg, x2leg, x3leg: legends for each histogram
:param fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)')
Inspired by http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label.
"""
pylab.rcParams.update({'font.size': fontsize})
pylab.figure(fig)
pylab.clf()
pylab.subplot(3,1,1)
pylab.hist(x1,label=x1leg,color='b',bins=bins1)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,2)
pylab.hist(x2,label=x2leg,color='r',bins=bins2)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,3)
pylab.hist(x3,label=x3leg,color='y',bins=bins3)
pylab.legend(loc='best',frameon=False)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fitconf(xdata,ydata,errx,erry,covxy,nboot=1000,bcesMethod='ort',linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot the best-fit line and
confidence band (generated using analytical methods). I decided to put together
these commands in a method because I have been using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
import bces.bces
from . import stats
from . import misc
# Selects the desired BCES method
i=misc.whichbces(bcesMethod)
# Performs the BCES fit
a,b,erra,errb,cov=bces.bces.bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=stats.confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(xcb, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
return a,b,erra,errb,cov
def fitconfmc(xdata,ydata,errx,erry,covxy,nboot=1000,bcesMethod='ort',linestyle='',conf=1.,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot the best-fit line and
confidence band (generated using MC). I decided to put together these
commands in a method because I have been using them very frequently.
Assumes you initialized the plot window before calling this method.
This method is more stable than fitconf, which is plagued with numerical
instabilities when computing the gradient.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
- conf: size of confidence band to be plotted in standard deviations
"""
import bces.bces
from . import misc
# Selects the desired BCES method
i=misc.whichbces(bcesMethod)
# Performs the BCES fit
a,b,erra,errb,cov=bces.bces.bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
# Plots confidence band
lcb,ucb,y=confbandmc(x,fitm,covm,10000,conf)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(x, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
return a,b,erra,errb,cov
def plotlinfit(xdata,ydata,a,b,erra,errb,cov,linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the output data from a linear regression
method (for example, bayeslin.pro, the Bayesian linear regression method
of Kelly (2007)), it plots the fits and the confidence bands.
The input is:
X, Y, slope (A), errA, intercept (B), errB and cov(A,B)
Assumes you initialized the plot window before calling this method.
Usage:
>>> nemmen.plotlinfit(x,y,a,b,erra,errb,covab,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a*x+b,linestyle,**args)
fitm=numpy.array([ a,b ]) # array with best-fit parameters
covm=numpy.array([ (erra**2,cov), (cov,errb**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(xcb, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
def jh(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters, best-fit line and
confidence band. Then returns the points corresponding to the line and
confidence band.
I wrote this for the John Hunter plotting contest, in order to simplify
my AGN-GRB plot. Inherited from method fitconf.
Usage:
>>> x,y,lcb,ucb=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
where y are the line points, lcb and ucb are the lower and upper confidence band
points.
:param xplot: if provided, will compute the confidence band in the X-values provided
with xplot
:param front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
y=a[i]*x+b[i]
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
return x,y,lcb,ucb
def fitconfpred(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.68,confcolor='LightGrey',predcolor='Khaki',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot (i) the best-fit line,
(ii) confidence band and (iii) prediction band.
I decided to put together these commands in a method because I have been
using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconfpred(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
if front==True:
zorder=10
else:
zorder=None
# Plots prediction band
lpb,upb,xpb=predbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xpb, lpb, upb, facecolor=predcolor,edgecolor='', zorder=zorder)
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xcb, lcb, ucb, facecolor=confcolor,edgecolor='', zorder=zorder)
return a,b,erra,errb,cov
def fitpred(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.68,predcolor='Khaki',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot (i) the best-fit line and
(ii) prediction band.
I decided to put together these commands in a method because I have been
using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitpred(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',predcolor='LightGrey')
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
if front==True:
zorder=10
else:
zorder=None
# Plots prediction band
lpb,upb,xpb=predbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xpb, lpb, upb, facecolor=predcolor,edgecolor='', zorder=zorder)
return a,b,erra,errb,cov
def uerrorbar(ux,uy,**args):
"""
Adaptation of pylab.errorbar to work with arrays defined using the
uncertainties package, which include the errorbars built-in.
Usage:
>>> uerrorbar(x,y,fmt='o')
will plot the points and error bars associated with the 'unumpy'
arrays x and y
"""
x=unumpy.nominal_values(ux)
y=unumpy.nominal_values(uy)
errx=unumpy.std_devs(ux)
erry=unumpy.std_devs(uy)
pylab.errorbar(x,y,xerr=errx,yerr=erry,**args)
def text(x,y,s,**args):
"""
Version of pylab.text that can be applied to arrays.
Usage:
>>> text(x,y,s, fontsize=10)
will plot the strings in array 's' at coordinates given by arrays
'x' and 'y'.
"""
for j in range(x.size):
pylab.text(x[j],y[j],s[j], **args)
def ipyplots():
"""
Makes sure we have exactly the same matplotlib settings as in the IPython terminal
version. Call this from IPython notebook.
`Source <http://stackoverflow.com/questions/16905028/why-is-matplotlib-plot-produced-from-ipython-notebook-slightly-different-from-te)>`_.
"""
pylab.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)
pylab.rcParams['font.size']=12 #10
pylab.rcParams['savefig.dpi']=100 #72
pylab.rcParams['figure.subplot.bottom']=.1 #.125
def make_cmap(colors, position=None, bit=False):
'''
make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
source: http://schubert.atmos.colostate.edu/~cslocum/custom_cmap.html
Chris Slocum, Colorado State University
'''
import matplotlib as mpl
import numpy as np
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]])
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(position, colors):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return cmap
def image(Z,xnew,ynew,my_cmap=None,aspect='equal'):
"""
Creates pretty image. You need to specify:
"""
imshow(log10(Z),extent=[xnew[0],xnew[-1],ynew[0],ynew[-1]], cmap=my_cmap)
pylab.axes().set_aspect('equal')
colorbar()
circle2=Circle((0,0),1,color='k')
gca().add_artist(circle2)
savefig('tmp.png',transparent=True,dpi=150)
def wolframcmap():
"""
Returns colormap that matches closely the one used by default
for images in Wolfram Mathematica 11 (dark blue to orange).
I spent one hour playing around to reproduce it.
Usage:
>>> mycmap=nmmn.plots.wolframcmap()
>>> imshow(rho, cmap=mycmap)
.. figure:: ../figures/wolframcmap.png
:scale: 100 %
:alt: Image plotted using Wolfram's colormap
Image plotted using Wolfram's colormap.
"""
# Create a list of RGB tuples, recreates Mathematica colormap
colors3=[(51,91,150),(111,116,143),(167,136,110),(233,167,85),(251,212,141),(255,247,190)]
# Call the function make_cmap which returns your colormap
return make_cmap(colors3, bit=True)
def parulacmap():
"""
Creates the beautiful Parula colormap which is Matlab's default.
Usage:
>>> mycmap=nmmn.plots.parulacmap()
>>> imshow(rho, cmap=mycmap)
Code taken from `here <https://github.com/BIDS/colormap/blob/master/parula.py>`_
"""
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952,
0.779247619], [0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238,
0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571,
0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667,
0.8467], [0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571,
0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524,
0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048,
0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381,
0.7607190476], [0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429,
0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619,
0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524,
0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476,
0.4493904762], [0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667,
0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857,
0.2886428571], [0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857,
0.2164142857], [0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333,
0.0948380952], [0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
return LinearSegmentedColormap.from_list('parula', cm_data)
def turbocmap():
"""
Returns the Turbo colormap: an improved version of the awful jet colormap.
The look-up table contains 256 entries. Each entry is a floating point sRGB triplet.
Usage:
>>> turbo=nmmn.plots.turbocmap()
>>> imshow(rho, cmap=turbo)
Copyright 2019 Google LLC.
SPDX-License-Identifier: Apache-2.0
Author: Anton Mikhailov
References:
- `turbo colormap array <https://gist.github.com/mikhailov-work/ee72ba4191942acecc03fe6da94fc73f>`_
- Google AI `blog post <https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html>`_ describing the advantages of the colormap
"""
from matplotlib.colors import ListedColormap
turbo_colormap_data = [[0.18995,0.07176,0.23217],[0.19483,0.08339,0.26149],[0.19956,0.09498,0.29024],[0.20415,0.10652,0.31844],[0.20860,0.11802,0.34607],[0.21291,0.12947,0.37314],[0.21708,0.14087,0.39964],[0.22111,0.15223,0.42558],[0.22500,0.16354,0.45096],[0.22875,0.17481,0.47578],[0.23236,0.18603,0.50004],[0.23582,0.19720,0.52373],[0.23915,0.20833,0.54686],[0.24234,0.21941,0.56942],[0.24539,0.23044,0.59142],[0.24830,0.24143,0.61286],[0.25107,0.25237,0.63374],[0.25369,0.26327,0.65406],[0.25618,0.27412,0.67381],[0.25853,0.28492,0.69300],[0.26074,0.29568,0.71162],[0.26280,0.30639,0.72968],[0.26473,0.31706,0.74718],[0.26652,0.32768,0.76412],[0.26816,0.33825,0.78050],[0.26967,0.34878,0.79631],[0.27103,0.35926,0.81156],[0.27226,0.36970,0.82624],[0.27334,0.38008,0.84037],[0.27429,0.39043,0.85393],[0.27509,0.40072,0.86692],[0.27576,0.41097,0.87936],[0.27628,0.42118,0.89123],[0.27667,0.43134,0.90254],[0.27691,0.44145,0.91328],[0.27701,0.45152,0.92347],[0.27698,0.46153,0.93309],[0.27680,0.47151,0.94214],[0.27648,0.48144,0.95064],[0.27603,0.49132,0.95857],[0.27543,0.50115,0.96594],[0.27469,0.51094,0.97275],[0.27381,0.52069,0.97899],[0.27273,0.53040,0.98461],[0.27106,0.54015,0.98930],[0.26878,0.54995,0.99303],[0.26592,0.55979,0.99583],[0.26252,0.56967,0.99773],[0.25862,0.57958,0.99876],[0.25425,0.58950,0.99896],[0.24946,0.59943,0.99835],[0.24427,0.60937,0.99697],[0.23874,0.61931,0.99485],[0.23288,0.62923,0.99202],[0.22676,0.63913,0.98851],[0.22039,0.64901,0.98436],[0.21382,0.65886,0.97959],[0.20708,0.66866,0.97423],[0.20021,0.67842,0.96833],[0.19326,0.68812,0.96190],[0.18625,0.69775,0.95498],[0.17923,0.70732,0.94761],[0.17223,0.71680,0.93981],[0.16529,0.72620,0.93161],[0.15844,0.73551,0.92305],[0.15173,0.74472,0.91416],[0.14519,0.75381,0.90496],[0.13886,0.76279,0.89550],[0.13278,0.77165,0.88580],[0.12698,0.78037,0.87590],[0.12151,0.78896,0.86581],[0.11639,0.79740,0.85559],[0.11167,0.80569,0.84525],[0.10738,0.81381,0.83484],[0.10357,0.82177,0.82437],[0.10026,0.82955,0.81389],[0.09750,0.83714,0.80342],[0.09532,0.84455,0.79299],[0.09377,0.85175,0.78264],[0.09287,0.85875,0.77240],[0.09267,0.86554,0.76230],[0.09320,0.87211,0.75237],[0.09451,0.87844,0.74265],[0.09662,0.88454,0.73316],[0.09958,0.89040,0.72393],[0.10342,0.89600,0.71500],[0.10815,0.90142,0.70599],[0.11374,0.90673,0.69651],[0.12014,0.91193,0.68660],[0.12733,0.91701,0.67627],[0.13526,0.92197,0.66556],[0.14391,0.92680,0.65448],[0.15323,0.93151,0.64308],[0.16319,0.93609,0.63137],[0.17377,0.94053,0.61938],[0.18491,0.94484,0.60713],[0.19659,0.94901,0.59466],[0.20877,0.95304,0.58199],[0.22142,0.95692,0.56914],[0.23449,0.96065,0.55614],[0.24797,0.96423,0.54303],[0.26180,0.96765,0.52981],[0.27597,0.97092,0.51653],[0.29042,0.97403,0.50321],[0.30513,0.97697,0.48987],[0.32006,0.97974,0.47654],[0.33517,0.98234,0.46325],[0.35043,0.98477,0.45002],[0.36581,0.98702,0.43688],[0.38127,0.98909,0.42386],[0.39678,0.99098,0.41098],[0.41229,0.99268,0.39826],[0.42778,0.99419,0.38575],[0.44321,0.99551,0.37345],[0.45854,0.99663,0.36140],[0.47375,0.99755,0.34963],[0.48879,0.99828,0.33816],[0.50362,0.99879,0.32701],[0.51822,0.99910,0.31622],[0.53255,0.99919,0.30581],[0.54658,0.99907,0.29581],[0.56026,0.99873,0.28623],[0.57357,0.99817,0.27712],[0.58646,0.99739,0.26849],[0.59891,0.99638,0.26038],[0.61088,0.99514,0.25280],[0.62233,0.99366,0.24579],[0.63323,0.99195,0.23937],[0.64362,0.98999,0.23356],[0.65394,0.98775,0.22835],[0.66428,0.98524,0.22370],[0.67462,0.98246,0.21960],[0.68494,0.97941,0.21602],[0.69525,0.97610,0.21294],[0.70553,0.97255,0.21032],[0.71577,0.96875,0.20815],[0.72596,0.96470,0.20640],[0.73610,0.96043,0.20504],[0.74617,0.95593,0.20406],[0.75617,0.95121,0.20343],[0.76608,0.94627,0.20311],[0.77591,0.94113,0.20310],[0.78563,0.93579,0.20336],[0.79524,0.93025,0.20386],[0.80473,0.92452,0.20459],[0.81410,0.91861,0.20552],[0.82333,0.91253,0.20663],[0.83241,0.90627,0.20788],[0.84133,0.89986,0.20926],[0.85010,0.89328,0.21074],[0.85868,0.88655,0.21230],[0.86709,0.87968,0.21391],[0.87530,0.87267,0.21555],[0.88331,0.86553,0.21719],[0.89112,0.85826,0.21880],[0.89870,0.85087,0.22038],[0.90605,0.84337,0.22188],[0.91317,0.83576,0.22328],[0.92004,0.82806,0.22456],[0.92666,0.82025,0.22570],[0.93301,0.81236,0.22667],[0.93909,0.80439,0.22744],[0.94489,0.79634,0.22800],[0.95039,0.78823,0.22831],[0.95560,0.78005,0.22836],[0.96049,0.77181,0.22811],[0.96507,0.76352,0.22754],[0.96931,0.75519,0.22663],[0.97323,0.74682,0.22536],[0.97679,0.73842,0.22369],[0.98000,0.73000,0.22161],[0.98289,0.72140,0.21918],[0.98549,0.71250,0.21650],[0.98781,0.70330,0.21358],[0.98986,0.69382,0.21043],[0.99163,0.68408,0.20706],[0.99314,0.67408,0.20348],[0.99438,0.66386,0.19971],[0.99535,0.65341,0.19577],[0.99607,0.64277,0.19165],[0.99654,0.63193,0.18738],[0.99675,0.62093,0.18297],[0.99672,0.60977,0.17842],[0.99644,0.59846,0.17376],[0.99593,0.58703,0.16899],[0.99517,0.57549,0.16412],[0.99419,0.56386,0.15918],[0.99297,0.55214,0.15417],[0.99153,0.54036,0.14910],[0.98987,0.52854,0.14398],[0.98799,0.51667,0.13883],[0.98590,0.50479,0.13367],[0.98360,0.49291,0.12849],[0.98108,0.48104,0.12332],[0.97837,0.46920,0.11817],[0.97545,0.45740,0.11305],[0.97234,0.44565,0.10797],[0.96904,0.43399,0.10294],[0.96555,0.42241,0.09798],[0.96187,0.41093,0.09310],[0.95801,0.39958,0.08831],[0.95398,0.38836,0.08362],[0.94977,0.37729,0.07905],[0.94538,0.36638,0.07461],[0.94084,0.35566,0.07031],[0.93612,0.34513,0.06616],[0.93125,0.33482,0.06218],[0.92623,0.32473,0.05837],[0.92105,0.31489,0.05475],[0.91572,0.30530,0.05134],[0.91024,0.29599,0.04814],[0.90463,0.28696,0.04516],[0.89888,0.27824,0.04243],[0.89298,0.26981,0.03993],[0.88691,0.26152,0.03753],[0.88066,0.25334,0.03521],[0.87422,0.24526,0.03297],[0.86760,0.23730,0.03082],[0.86079,0.22945,0.02875],[0.85380,0.22170,0.02677],[0.84662,0.21407,0.02487],[0.83926,0.20654,0.02305],[0.83172,0.19912,0.02131],[0.82399,0.19182,0.01966],[0.81608,0.18462,0.01809],[0.80799,0.17753,0.01660],[0.79971,0.17055,0.01520],[0.79125,0.16368,0.01387],[0.78260,0.15693,0.01264],[0.77377,0.15028,0.01148],[0.76476,0.14374,0.01041],[0.75556,0.13731,0.00942],[0.74617,0.13098,0.00851],[0.73661,0.12477,0.00769],[0.72686,0.11867,0.00695],[0.71692,0.11268,0.00629],[0.70680,0.10680,0.00571],[0.69650,0.10102,0.00522],[0.68602,0.09536,0.00481],[0.67535,0.08980,0.00449],[0.66449,0.08436,0.00424],[0.65345,0.07902,0.00408],[0.64223,0.07380,0.00401],[0.63082,0.06868,0.00401],[0.61923,0.06367,0.00410],[0.60746,0.05878,0.00427],[0.59550,0.05399,0.00453],[0.58336,0.04931,0.00486],[0.57103,0.04474,0.00529],[0.55852,0.04028,0.00579],[0.54583,0.03593,0.00638],[0.53295,0.03169,0.00705],[0.51989,0.02756,0.00780],[0.50664,0.02354,0.00863],[0.49321,0.01963,0.00955],[0.47960,0.01583,0.01055]]
return ListedColormap(turbo_colormap_data)
def jointplot(X,Y,xlabel=None,ylabel=None,binsim=40,binsh=20,contour=True):
"""
Plots the joint distribution of posteriors for X1 and X2, including the 1D
histograms showing the median and standard deviations.
The work that went in creating this nice method is shown, step by step, in
the ipython notebook "error contours.ipynb". Sources of inspiration:
- http://python4mpia.github.io/intro/quick-tour.html
- http://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
Usage:
>>> jointplot(M.rtr.trace(),M.mdot.trace(),xlabel='$\log \ r_{\\rm tr}$', ylabel='$\log \ \dot{m}$')
gives the following plot.
.. figure:: ../figures/jointplot.png
:scale: 100 %
:alt: Two-dimensional kernel density distribution.
Two-dimensional kernel density distribution, along with one-dimensional histograms of each distribution.
"""
import scipy.stats
# Generates 2D histogram for image
histt, xt, yt = numpy.histogram2d(X, Y, bins=[binsim,binsim], normed=False)
histt = numpy.transpose(histt) # Beware: numpy switches axes, so switch back.
# assigns correct proportions to subplots
fig=pylab.figure()
gs = pylab.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3], wspace=0.001, hspace=0.001)
con=pylab.subplot(gs[2])
histx=pylab.subplot(gs[0], sharex=con)
histy=pylab.subplot(gs[3], sharey=con)
# Image
con.imshow(histt,extent=[xt[0],xt[-1], yt[0],yt[-1]],origin='lower',cmap=pylab.cm.gray_r,aspect='auto')
# Overplot with error contours 1,2 sigma
if contour==True:
pdf = scipy.stats.gaussian_kde([X, Y])
x,y = pylab.meshgrid(xt,yt)
z = numpy.array(pdf.evaluate([x.flatten(),y.flatten()])).reshape(x.shape)
# the [61,15] values were obtained by trial and error until the joint confidence
# contours matched the confidence intervals from the individual X,Y
s=scipy.stats.scoreatpercentile(pdf(pdf.resample(1000)), [61,15])
cs=con.contour(x,y,z, levels=s, extent=[x[0],x[-1], y[0],y[-1]], linestyles=['-','-','-'], colors=['black','blue'])
# use dictionary in order to assign your own labels to the contours.
#fmtdict = {s[0]:r'$1\sigma$',s[1]:r'$2\sigma$'}
#con.clabel(cs, fmt=fmtdict, inline=True, fontsize=20)
if xlabel!=None: con.set_xlabel(xlabel)
if ylabel!=None: con.set_ylabel(ylabel)
# X-axis histogram
histx.hist(X, binsh, histtype='stepfilled',facecolor='lightblue')
pylab.setp(histx.get_xticklabels(), visible=False) # no X label
pylab.setp(histx.get_yticklabels(), visible=False) # no Y label
# Vertical lines with median and 1sigma confidence
yax=histx.set_ylim()
histx.plot([numpy.median(X),numpy.median(X)],[yax[0],yax[1]],'k-',linewidth=2) # median
xsd=scipy.stats.scoreatpercentile(X, [15.87,84.13])
histx.plot([xsd[0],xsd[0]],[yax[0],yax[1]],'k--') # -1sd
histx.plot([xsd[-1],xsd[-1]],[yax[0],yax[1]],'k--') # +1sd
# Y-axis histogram
histy.hist(Y, binsh, histtype='stepfilled', orientation='horizontal',facecolor='lightyellow')
pylab.setp(histy.get_yticklabels(), visible=False) # no Y label
pylab.setp(histy.get_xticklabels(), visible=False) # no X label
# Vertical lines with median and 1sigma confidence
xax=histy.set_xlim()
histy.plot([xax[0],xax[1]],[numpy.median(Y),numpy.median(Y)],'k-',linewidth=2) # median
ysd=scipy.stats.scoreatpercentile(Y, [15.87,84.13])
histy.plot([xax[0],xax[1]],[ysd[0],ysd[0]],'k--') # -1sd
histy.plot([xax[0],xax[1]],[ysd[-1],ysd[-1]],'k--') # +1sd
def symlog(x, C=1./numpy.log(10.)):
"""
Applies a modified logarithm function to x that handles negative
values while maintaining continuity across
zero. This function solves a very concrete problem: how to handle
data that spans a huge range and has also negative values? log10
will fail. This is the answer.
The transformation is defined in an article from the journal
Measurement Science and Technology (Webber, 2012):
y = sign(x)*(log10(1+abs(x)/(10^C)))
where the scaling constant C determines the resolution of the data
around zero. The smallest order of magnitude shown on either side of
zero will be 10^ceil(C).
Reference: MATHWORKS symlog <https://www.mathworks.com/matlabcentral/fileexchange/57902-symlog>
"""
return numpy.sign(x)*(numpy.log10(1+numpy.abs(x)/(10**C)))
|
mit
|
procoder317/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
63
|
26190
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
|
bsd-3-clause
|
fredhusser/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
|
fengzhyuan/scikit-learn
|
sklearn/utils/extmath.py
|
142
|
21102
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
bsd-3-clause
|
Kortemme-Lab/kddg
|
kddg/api/monomer.py
|
1
|
68257
|
#!/usr/bin/python2.4
# encoding: utf-8
"""
monomer.py
High-level functions for interacting with the protein stability sections of the ddG database.
Classes:
MonomericStabilityDDGInterface - an class used to interface with the database. Call get_interface to get a user API based on this class.
AnalysisBreakdown - an class used to run analyses on the data
Note: I moved this code from db.py during a large refactor and have not tested it yet.
A lot of functionality is currently broken but all the pieces are there. See Trac ticket #1375.
Created by Shane O'Connor 2015.
Copyright (c) 2015 __UCSF__. All rights reserved.
"""
from io import BytesIO
import os
import zipfile
import traceback
import copy
import pprint
import datetime
import numpy
from sqlalchemy import and_
import pandas as pd
from klab import colortext
from klab.bio.alignment import ScaffoldModelChainMapper
from klab.benchmarking.analysis.ddg_monomeric_stability_analysis import DBBenchmarkRun as MonomericStabilityBenchmarkRun
from klab.benchmarking.analysis.ddg_binding_affinity_analysis import DBBenchmarkRun as BindingAffinityBenchmarkRun
import kddg.api.schema as dbmodel
from kddg.api.layers import *
from kddg.api.db import ddG, PartialDataException
from kddg.api import settings
sys_settings = settings.load()
DeclarativeBase = dbmodel.DeclarativeBase
def get_interface(passwd, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = 3306):
'''This is the function that should be used to get a MonomericStabilityDDGInterface interface object. It hides the
private methods from the user so that a more traditional object-oriented API is created.'''
return GenericUserInterface.generate(MonomericStabilityDDGInterface, passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port)
class MonomericStabilityDDGInterface(ddG):
def __init__(self, passwd = None, username = sys_settings.database.username, hostname = None, rosetta_scripts_path = None, rosetta_database_path = None, file_content_buffer_size = None, port = sys_settings.database.port):
super(MonomericStabilityDDGInterface, self).__init__(passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, file_content_buffer_size = file_content_buffer_size, port = port)
self.prediction_data_path = self.DDG_db.execute('SELECT Value FROM _DBCONSTANTS WHERE VariableName="PredictionDataPath"')[0]['Value']
#########################################################################################
## Broken API layer
##
## This section contains useful functions which need to be updated to work with the new
## schema or code
#########################################################################################
#== Deprecated functions =================================================================
@deprecated
def get_prediction_experiment_chains(self, predictionset): raise Exception('This function has been deprecated. Use get_pdb_chains_used_for_prediction_set instead.')
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_pdb
def get_pdb_chains_for_prediction(self, prediction_id):
raise Exception('This needs to be implemented.')
#todo: remove this and replace with get_pdb_mutations_for_mutagenesis
@informational_pdb
def get_pdb_mutations_for_experiment(self, experiment_id):
'''Returns the PDB mutations for a mutagenesis experiment as well as the PDB residue information.'''
pdb_mutations = []
# SELECT ExperimentMutation.*, Experiment.PDBFileID, PDBResidue.ResidueType,
# PDBResidue.BFactorMean, PDBResidue.BFactorDeviation,
# PDBResidue.ComplexExposure, PDBResidue.ComplexDSSP, PDBResidue.MonomericExposure, PDBResidue.MonomericDSSP
# FROM
# Experiment INNER JOIN ExperimentMutation ON Experiment.ID = ExperimentMutation.ExperimentID
# INNER JOIN
# PDBResidue ON Experiment.PDBFileID = PDBResidue.PDBFileID AND ExperimentMutation.Chain = PDBResidue.Chain AND ExperimentMutation.ResidueID = PDBResidue.ResidueID AND ExperimentMutation.WildTypeAA = PDBResidue.ResidueAA
# WHERE Experiment.ID=%s ORDER BY Chain, ResidueID''', parameters=(experiment_id,)):
for pdb_mutation in self.DDG_db.execute_select('''
SELECT ExperimentMutation.*, Experiment.PDBFileID
FROM
Experiment INNER JOIN ExperimentMutation ON Experiment.ID = ExperimentMutation.ExperimentID
WHERE Experiment.ID=%s ORDER BY Chain, ResidueID''', parameters=(experiment_id,)):
pdb_mutation['ResidueType'] = None
pdb_mutation['BFactorMean'] = None
pdb_mutation['BFactorDeviation'] = None
pdb_mutation['ComplexExposure'] = None
pdb_mutation['ComplexDSSP'] = None
pdb_mutation['MonomericExposure'] = None
pdb_mutation['MonomericDSSP'] = None
pdb_mutations.append(pdb_mutation)
return pdb_mutations
#todo: remove this and replace with get_user_dataset_experiment_details
def get_experiment_details(self, experiment_id):
e = self.DDG_db.execute_select('SELECT * FROM Experiment WHERE ID=%s', parameters=(experiment_id,))
if len(e) != 1:
raise colortext.Exception('Experiment %d does not exist.' % (experiment_id, ))
e = e[0]
pdb_mutations = self.get_pdb_mutations_for_experiment(experiment_id)
assert(len(pdb_mutations) > 0)
pdb_id = set([m['PDBFileID'] for m in pdb_mutations])
assert(len(pdb_id) == 1)
pdb_id = pdb_id.pop()
return dict(
Mutagenesis = dict(
ExperimentID = e['ID'],
),
Structure = dict(
PDBFileID = pdb_id,
),
PDBMutations = pdb_mutations,
)
@informational_job
def get_job_details(self, prediction_id, include_files = True, truncate_content = None):
prediction_record = self.DDG_db.execute_select('SELECT * FROM Prediction WHERE ID=%s', parameters=(prediction_id,))
if not prediction_record:
raise Exception('No details could be found for prediction #%d in the database.' % prediction_id)
prediction_record = prediction_record[0]
prediction_record['Files'] = {}
if include_files:
prediction_record['Files'] = self.get_job_files(prediction_id, truncate_content = truncate_content)
# mutfile_content = self.create_mutfile(prediction_id)
# Read the UserPPDataSetExperiment details
user_dataset_experiment_id = prediction_record['UserDataSetExperimentID']
if user_dataset_experiment_id:
ude_details = self.get_user_dataset_experiment_details(user_dataset_experiment_id)
assert(ude_details['Mutagenesis']['PPMutagenesisID'] == prediction_record['PPMutagenesisID'])
for k, v in ude_details.iteritems():
assert(k not in prediction_record)
prediction_record[k] = v
else:
# todo: Remove this later
e_details = self.get_experiment_details(prediction_record['ExperimentID'])
for k, v in e_details.iteritems():
assert(k not in prediction_record)
prediction_record[k] = v
return prediction_record
@informational_job
def get_predictions_experimental_details(self, prediction_id, userdatset_experiment_ids_to_subset_ddgs = None, include_files = False, reference_ids = set(), include_experimental_data = True):
details = self.get_job_details(prediction_id, include_files = include_files)
# Add the DDG values for the related analysis sets
if include_experimental_data:
userdatset_experiment_ids_to_subset_ddgs = userdatset_experiment_ids_to_subset_ddgs or self.get_experimental_ddgs_by_analysis_set(user_dataset_experiment_id, reference_ids = reference_ids)
assert('DDG' not in details)
details['DDG'] = userdatset_experiment_ids_to_subset_ddgs[user_dataset_experiment_id]
else:
details['DDG'] = None
return details
###########################################################################################
## Prediction creation/management layer
##
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
@job_creator
def add_prediction_set(self, prediction_set_id, halted = True, priority = 5, batch_size = 40, allow_existing_prediction_set = False):
return super(MonomericStabilityDDGInterface, self).add_prediction_set(prediction_set_id, halted = halted, priority = priority, batch_size = batch_size, allow_existing_prediction_set = allow_existing_prediction_set, contains_protein_stability_predictions = True, contains_binding_affinity_predictions = False)
@job_creator
def add_job(self, experimentID, UserDataSetExperimentID, PredictionSet, ProtocolID, keep_hetatm_lines, PDB_ID = None, ReverseMutation = False, InputFiles = {}, testonly = False, strip_other_chains = True):
'''This function inserts a prediction into the database.
The parameters define:
the experiment we are running the prediction for;
the name of the set of predictions for later grouping;
the short description of the Command to be used for prediction;
whether HETATM lines are to be kept or not.
We strip the PDB based on the chains used for the experiment and keep_hetatm_lines.
We then add the prediction record, including the stripped PDB and the inverse mapping
from Rosetta residue numbering to PDB residue numbering.'''
raise Exception('This function needs to be rewritten.')
raise Exception('Make sure to call charge by residue function, _charge_prediction_set_by_residue_count')
raise Exception('Add keep_all_lines option')
parameters = (experimentID,)
assert(ReverseMutation == False) # todo: allow this later
try:
predictionPDB_ID = None
sql = "SELECT PDBFileID, Content FROM Experiment INNER JOIN PDBFile WHERE Experiment.PDBFileID=PDBFile.ID AND Experiment.ID=%s"
results = self.DDG_db.execute_select(sql, parameters = parameters)
if len(results) != 1:
raise colortext.Exception("The SQL query '%s' returned %d results where 1 result was expected." % (sql, len(results)))
experimentPDB_ID = results[0]["PDBFileID"]
pdbID = results[0]["PDBFileID"]
if PDB_ID:
#sql = "SELECT ID, Content FROM PDBFile WHERE ID=%s"
results = self.DDG_db.execute_select("SELECT ID, Content FROM PDBFile WHERE ID=%s", parameters=(PDB_ID))
if len(results) != 1:
raise colortext.Exception("The SQL query '%s' returned %d results where 1 result was expected." % (sql, len(results)))
predictionPDB_ID = results[0]["ID"]
pdbID = results[0]["ID"]
else:
predictionPDB_ID = experimentPDB_ID
# Get the related PDB ID and file
assert(len(results) == 1)
result = results[0]
contents = result["Content"]
pdb = PDB(contents.split("\n"))
# Check that the mutated positions exist and that the wild-type matches the PDB
mutations = self.DDG_db.call_select_proc("GetMutations", parameters = parameters)
# todo: Hack. This should be removed when PDB homologs are dealt with properly.
mutation_objects = []
for mutation in mutations:
if experimentPDB_ID == "1AJ3" and predictionPDB_ID == "1U5P":
assert(int(mutation['ResidueID']) < 1000)
mutation['ResidueID'] = str(int(mutation['ResidueID']) + 1762)
mutation_objects.append(Mutation(mutation['WildTypeAA'], mutation['ResidueID'], mutation['MutantAA'], mutation['Chain']))
#todo: a
#checkPDBAgainstMutations(pdbID, pdb, mutations)
pdb.validate_mutations(mutation_objects)
#for mutation in mutations:
# if experimentPDB_ID == "ub_OTU":
# mutation['ResidueID'] = str(int(mutation['ResidueID']) + 172)
# Strip the PDB to the list of chains. This also renumbers residues in the PDB for Rosetta.
chains = [result['Chain'] for result in self.DDG_db.call_select_proc("GetChains", parameters = parameters)]
if strip_other_chains:
pdb.stripForDDG(chains, keep_hetatm_lines, numberOfModels = 1)
else:
pdb.stripForDDG(True, keep_hetatm_lines, numberOfModels = 1)
#print('\n'.join(pdb.lines))
# - Post stripping checks -
# Get the 'Chain ResidueID' PDB-formatted identifier for each mutation mapped to Rosetta numbering
# then check again that the mutated positions exist and that the wild-type matches the PDB
colortext.warning('mutations %s' % (str(mutations)))
remappedMutations = pdb.remapMutations(mutations, pdbID)
#resfile = self._createResfile(pdb, remappedMutations)
mutfile = self._createMutfile(pdb, remappedMutations)
# Check to make sure that we haven't stripped all the ATOM lines
if not pdb.GetAllATOMLines():
raise colortext.Exception("No ATOM lines remain in the stripped PDB file of %s." % pdbID)
# Check to make sure that CSE and MSE are not present in the PDB
badresidues = pdb.CheckForPresenceOf(["CSE", "MSE"])
if badresidues:
raise colortext.Exception("Found residues [%s] in the stripped PDB file of %s. These should be changed to run this job under Rosetta." % (', '.join(badresidues), pdbID))
# Turn the lines array back into a valid PDB file
strippedPDB = '\n'.join(pdb.lines)
except Exception, e:
colortext.error("Error in %s, %s: .\n%s" % (experimentID, UserDataSetExperimentID, traceback.format_exc()))
colortext.warning(str(e))
return
colortext.error("\nError: '%s'.\n" % (str(e)))
colortext.error(traceback.format_exc())
raise colortext.Exception("An exception occurred retrieving the experimental data for Experiment ID #%s." % experimentID)
#InputFiles["RESFILE"] = resfile
InputFiles["MUTFILE"] = mutfile
ExtraParameters = {}
InputFiles = pickle.dumps(InputFiles)
ExtraParameters = pickle.dumps(ExtraParameters)
PredictionFieldNames = self.DDG_db.FieldNames.Prediction
params = {
PredictionFieldNames.ExperimentID : experimentID,
PredictionFieldNames.UserDataSetExperimentID : UserDataSetExperimentID,
PredictionFieldNames.PredictionSet : PredictionSet,
PredictionFieldNames.ProtocolID : ProtocolID,
PredictionFieldNames.KeptHETATMLines : keep_hetatm_lines,
PredictionFieldNames.StrippedPDB : strippedPDB,
PredictionFieldNames.ResidueMapping : pickle.dumps(pdb.get_ddGInverseResmap()),
PredictionFieldNames.InputFiles : InputFiles,
PredictionFieldNames.Status : "queued",
PredictionFieldNames.ExtraParameters : ExtraParameters,
}
if not testonly:
self.DDG_db.insertDict('Prediction', params)
# Add cryptID string
predictionID = self.DDG_db.getLastRowID()
entryDate = self.DDG_db.execute_select("SELECT EntryDate FROM Prediction WHERE ID=%s", parameters = (predictionID,))[0]["EntryDate"]
rdmstring = ''.join(random.sample('0123456789abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', 16))
cryptID = "%(predictionID)s%(experimentID)s%(PredictionSet)s%(ProtocolID)s%(entryDate)s%(rdmstring)s" % vars()
cryptID = md5.new(cryptID.encode('utf-8')).hexdigest()
entryDate = self.DDG_db.execute("UPDATE Prediction SET cryptID=%s WHERE ID=%s", parameters = (cryptID, predictionID))
return predictionID
@job_creator
def add_jobs_by_pdb_id(self, pdb_ID, PredictionSet, ProtocolID, status = 'active', priority = 5, keep_hetatm_lines = False, strip_other_chains = True):
raise Exception('This function needs to be rewritten.')
colortext.printf("\nAdding any mutations for this structure which have not been queued/run in the %s prediction set." % PredictionSet, "lightgreen")
d = {
'ID' : PredictionSet,
'Status' : status,
'Priority' : priority,
'BatchSize' : 40,
'EntryDate' : datetime.datetime.now(),
}
self.DDG_db.insertDictIfNew('PredictionSet', d, ['ID'])
# Update the priority and activity if necessary
self.DDG_db.execute('UPDATE PredictionSet SET Status=%s, Priority=%s WHERE ID=%s', parameters = (status, priority, PredictionSet))
# Determine the set of experiments to add
ExperimentIDs = set([r['ID'] for r in self.DDG_db.execute_select('SELECT ID FROM Experiment WHERE PDBFileID=%s', parameters=(pdb_ID,))])
ExperimentIDsInPredictionSet = set([r['ExperimentID'] for r in self.DDG_db.execute_select('SELECT ExperimentID FROM Prediction WHERE PredictionSet=%s', parameters=(PredictionSet,))])
experiment_IDs_to_add = sorted(ExperimentIDs.difference(ExperimentIDsInPredictionSet))
if experiment_IDs_to_add:
colortext.printf("\nAdding %d jobs to the prediction set." % len(experiment_IDs_to_add), "lightgreen")
count = 0
for experiment_ID in experiment_IDs_to_add:
colortext.write('.', "lightgreen")
self.addPrediction(experiment_ID, None, PredictionSet, ProtocolID, keep_hetatm_lines, strip_other_chains = strip_other_chains)
count +=1
else:
colortext.printf("\nAll jobs are already in the queue or have been run.", "lightgreen")
print('')
@job_creator
def add_prediction_run(self, user_dataset_name, prediction_set_id, protocol_id, keep_hetatm_lines, InputFiles = {}, quiet = False, testonly = False, only_single_mutations = False, shortrun = False):
raise Exception('This function needs to be rewritten.')
assert(self.DDG_db.execute_select("SELECT ID FROM PredictionSet WHERE ID=%s", parameters=(prediction_set_id,)))
#results = self.DDG_db.execute_select("SELECT * FROM UserDataSet WHERE TextID=%s", parameters=(user_dataset_name,))
results = self.DDG_db.execute_select("SELECT UserDataSetExperiment.* FROM UserDataSetExperiment INNER JOIN UserDataSet ON UserDataSetID=UserDataSet.ID WHERE UserDataSet.TextID=%s", parameters=(user_dataset_name,))
if not results:
return False
if not(quiet):
colortext.message("Creating predictions for UserDataSet %s using protocol %s" % (user_dataset_name, protocol_id))
colortext.message("%d records found in the UserDataSet" % len(results))
count = 0
showprogress = not(quiet) and len(results) > 300
if showprogress:
print("|" + ("*" * (int(len(results)/100)-2)) + "|")
for r in results:
existing_results = self.DDG_db.execute_select("SELECT * FROM Prediction WHERE PredictionSet=%s AND UserDataSetExperimentID=%s", parameters=(prediction_set_id, r["ID"]))
if len(existing_results) > 0:
#colortext.warning('There already exist records for this UserDataSetExperimentID. You probably do not want to proceed. Skipping this entry.')
continue
PredictionID = self.addPrediction(r["ExperimentID"], r["ID"], prediction_set_id, protocol_id, keep_hetatm_lines, PDB_ID = r["PDBFileID"], ReverseMutation = False, InputFiles = {}, testonly = testonly)
count += 1
if showprogress:
if count > 100:
colortext.write(".", "cyan", flush = True)
count = 0
if shortrun and count > 4:
break
print("")
return(True)
@job_creator
def clone_prediction_run(self, existing_prediction_set, new_prediction_set):
raise Exception('not implemented yet')
#assert(existing_prediction_set exists and has records)
#assert(new_prediction_set is empty)
#for each prediction record, add the record and all associated predictionfile records,
#== Input file generation API ===========================================================
#
# This part of the API is responsible for creating input files for predictions
@job_input
def create_resfile(self, prediction_id):
raise Exception('This needs to be implemented.')
@job_input
def create_mutfile(self, prediction_id):
raise Exception('This needs to be implemented.')
#== Job execution/completion API ===========================================================
#
# This part of the API is responsible for starting jobs and setting them as failed or
# completed
@job_execution
def get_job(self, prediction_set):
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def start_job(self, prediction_id, prediction_set):
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def get_max_number_of_cluster_jobs(self, prediction_set_id, priority):
return self.DDG_db.execute_select('SELECT Value FROM _DBCONSTANTS WHERE VariableName="MaxStabilityClusterJobs"')['Value']
@job_completion
def fail_job(self, prediction_id, prediction_set, maxvmem, ddgtime):
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_completion
def parse_prediction_scores(self, stdout):
'''Returns a list of dicts suitable for database storage e.g. PredictionStructureScore records.'''
self._parse_ddg_monomer_scores_per_structure(stdout)
@job_completion
def store_scores(self, prediction_set, prediction_id, scores):
'''Stores a list of dicts suitable for database storage e.g. PredictionStructureScore records.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@job_completion
def complete_job(self, prediction_id, prediction_set, scores, maxvmem, ddgtime):
raise Exception('This function needs to be implemented by subclasses of the API.')
@staticmethod
def _parse_ddg_monomer_scores_per_structure(stdout):
'''Returns a dict mapping the DDG scores from a ddg_monomer run to a list of structure numbers.'''
# Parse the stdout into two mappings (one for wildtype structures, one for mutant structures) mapping
# structure IDs to a dict containing the score components
wildtype_scores = {}
mutant_scores = {}
s1 = 'score before mutation: residue'
s1_len = len(s1)
s2 = 'score after mutation: residue'
s2_len = len(s2)
for line in stdout.split('\n'):
idx = line.find(s1)
if idx != -1:
idx += s1_len
mtchs = re.match('.*?(\d+) %s' % s1, line)
structure_id = int(mtchs.group(1))
assert(structure_id not in wildtype_scores)
tokens = line[idx:].split()
d = {'total' : float(tokens[0])}
for x in range(1, len(tokens), 2):
component_name = tokens[x].replace(':', '')
assert(rosetta_weights.get(component_name))
component_value = float(tokens[x + 1])
d[component_name] = component_value
wildtype_scores[structure_id] = d
else:
idx = line.find(s2)
if idx != -1:
idx += s2_len
mtchs = re.match('.*?(\d+) %s' % s2, line)
structure_id = int(mtchs.group(1))
assert(structure_id not in mutant_scores)
tokens = line[idx:].split()
d = {'total' : float(tokens[1])}
for x in range(2, len(tokens), 2):
component_name = tokens[x].replace(':', '')
assert(rosetta_weights.get(component_name))
component_value = float(tokens[x + 1])
d[component_name] = component_value
mutant_scores[structure_id] = d
# Sanity checks
num_structures = max(wildtype_scores.keys())
expected_keys = set(range(1, num_structures + 1))
assert(expected_keys == set(wildtype_scores.keys()))
assert(expected_keys == set(mutant_scores.keys()))
# Create a list of lists - MutantScoreOrder - of structure IDs e.g. [[5,1,34], [23], [12,3], ...] which is ordered
# by increasing energy so that each sublist contains structure IDs of equal energy and if structures have the same
# energy then their IDs are in the same sublist
d = {}
for structure_id, scores in sorted(mutant_scores.iteritems()):
d[scores['total']] = d.get(scores['total'], [])
d[scores['total']].append(structure_id)
MutantScoreOrder = []
for score, structure_ids in sorted(d.iteritems()):
MutantScoreOrder.append(structure_ids)
# Sanity check - make sure that MutantScoreOrder is really ordered such that each set of structure IDs contains
# structures of the same energy and of a lower energy than the following set of structure IDs in the list
for x in range(len(MutantScoreOrder) - 1):
s1 = set([mutant_scores[n]['total'] for n in MutantScoreOrder[x]])
assert(len(s1) == 1)
if x + 1 < len(MutantScoreOrder):
s2 = set([mutant_scores[n]['total'] for n in MutantScoreOrder[x + 1]])
assert(len(s2) == 1)
assert(s1.pop() < s2.pop())
return dict(
WildType = wildtype_scores,
Mutant = mutant_scores,
MutantScoreOrder = MutantScoreOrder,
)
###########################################################################################
## Prediction results layer
##
## This part of the API for returning data about completed predictions.
###########################################################################################
@job_results
def get_ddg_scores_per_structure(self, prediction_id):
# At present, we only use ddg_monomer
raise Exception('Reimplement using the database records.')
###########################################################################################
## Analysis layer
##
## This part of the API is responsible for running analysis on completed predictions
###########################################################################################
@analysis_api
def determine_best_pair(self, prediction_id, score_method_id):
# Iterates over the (wildtype, mutant) pairs in the PredictionStructureScore table and returns the structure ID
# for the pair with the lowest energy mutant
# Note: There are multiple ways to select the best pair. For example, if multiple mutants have the same minimal total
# score, we could have multiple wildtype structures to choose from. In this case, we choose a pair where the wildtype
# structure has the minimal total score.
lowest_wt_score = self.DDG_db.execute_select('SELECT StructureID, total FROM PredictionStructureScore WHERE PredictionID=%s AND ScoreMethodID=%s AND ScoreType="WildType" ORDER BY total LIMIT 1', parameters=(prediction_id, score_method_id))
lowest_mutant_score = self.DDG_db.execute_select('SELECT StructureID, total FROM PredictionStructureScore WHERE PredictionID=%s AND ScoreMethodID=%s AND ScoreType="Mutant" ORDER BY total LIMIT 1', parameters=(prediction_id, score_method_id))
if lowest_wt_score and lowest_mutant_score:
return lowest_wt_score[0]['StructureID'], lowest_mutant_score[0]['StructureID']
return None, None
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
mutant_complexes = []
wildtype_complexes = []
for structure_id, scores in scores.iteritems():
if scores.get('MutantComplex'):
mutant_complexes.append((scores['MutantComplex']['total'], structure_id))
if scores.get('WildTypeComplex'):
wildtype_complexes.append((scores['WildTypeComplex']['total'], structure_id))
wildtype_complexes = sorted(wildtype_complexes)
mutant_complexes = sorted(mutant_complexes)
if wildtype_complexes and mutant_complexes:
return wildtype_complexes[0][1], mutant_complexes[0][1]
return None, None
lowest_mutant_score = self.DDG_db.execute_select('SELECT total FROM PredictionStructureScore WHERE PredictionID=%s AND ScoreMethodID=%s AND ScoreType="Mutant" ORDER BY total LIMIT 1', parameters=(prediction_id, score_method_id))
if lowest_mutant_score:
lowest_mutant_score = lowest_mutant_score[0]['total']
mutant_structure_ids = [r['StructureID'] for r in self.DDG_db.execute_select('SELECT StructureID FROM PredictionStructureScore WHERE PredictionID=%s AND ScoreMethodID=%s AND ScoreType="Mutant" AND total=%s', parameters=(prediction_id, score_method_id, lowest_mutant_score))]
if len(mutant_structure_ids) > 1:
return self.DDG_db.execute_select(('SELECT StructureID FROM PredictionStructureScore WHERE PredictionID=%s AND ScoreMethodID=%s AND ScoreType="WildType" AND StructureID IN (' + ','.join(map(str, mutant_structure_ids)) + ') ORDER BY total LIMIT 1'), parameters=(prediction_id, score_method_id ))[0]['StructureID']
else:
return mutant_structure_ids[0]
return None
@analysis_api
def get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, top_x = 3, expectn = None, extract_data_for_case_if_missing = True, root_directory = None, dataframe_type = "Stability"):
assert(dataframe_type == "Binding affinity") # todo: stability case needs to be written
try:
top_x_ddg = self.get_top_x_ddg_affinity(prediction_id, score_method_id, top_x = top_x, expectn = expectn)
except Exception, e:
colortext.pcyan(str(e))
colortext.warning(traceback.format_exc())
if extract_data_for_case_if_missing:
self.extract_data_for_case(prediction_id, root_directory = root_directory, force = True, score_method_id = score_method_id)
try:
top_x_ddg = self.get_top_x_ddg_affinity(prediction_id, score_method_id, top_x = top_x, expectn = expectn)
except PartialDataException, e:
raise
except Exception, e:
raise
top_x_ddg_stability = self.get_top_x_ddg_stability(prediction_id, score_method_id, top_x = top_x, expectn = expectn)
return {
main_ddg_analysis_type : top_x_ddg,
'DDGStability_Top%d' % top_x : top_x_ddg_stability,
}
@analysis_api
def get_top_x_ddg_affinity(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''This function was taken from the PPI API.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
try:
#colortext.warning(prediction_id)
#pprint.pprint(scores)
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
#print(wt_total_scores)
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
#print(mut_total_scores)
top_x_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)
])
return top_x_score
except Exception, e:
print(e)
colortext.warning(traceback.format_exc())
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_top_x_ddg_stability(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction only considering the complex scores. This computation may work as a
measure of a stability DDG value.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
return numpy.average([scores[mut_struct_num]['MutantComplex']['total'] - scores[wt_struct_num]['WildTypeComplex']['total']
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)])
@analysis_api
def get_top_x_ddg(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction. Typically, this is the mean value of the top X predictions for a
case computed using the associated Score records in the database.'''
# Make sure that we have as many cases as we expect
if expectn != None:
scores = self.get_prediction_scores(prediction_id)[score_method_id]
num_cases = 0
for k in scores.keys():
if type(k) == type(1L):
num_cases += 1
if num_cases != expectn:
raise Exception('Expected scores for {0} runs; found {1}.'.format(expectn, num_cases))
# Call the stored procedure which takes the top_x-lowest wildtype scores and gets their average then subtracts this from the average of the top_x-lowest mutant scores
try:
r = self.DDG_db.call_select_proc('MonomericStabilityTopX', parameters=(55808, score_method_id, 3), quiet=False)
assert(len(r) == 1)
return r[0]['TopX']
except Exception, e:
raise Exception('An error occurred determining the Top{0} score for prediction #{1} using score method {2}: "{3}"\n{4}'.format(top_x, prediction_id, score_method_id, str(e), traceback.print_exc()))
@analysis_api
def get_analysis_dataframe(self, prediction_set_id,
experimental_data_exists = True,
create_binding_affinity_dataframe = False, # Hack added for PUBS class
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = True,
):
#todo: rename function since we return BenchmarkRun objects
assert(score_method_id)
assert(experimental_data_exists == False) # todo: I am implementing the case needed for the PUBS class - the general case still needs to be implemented. Change BindingAffinityBenchmarkRun below accordingly (BindingAffinityBenchmarkRun for affinity, MonomericStabilityBenchmarkRun for stability)
# We allow different dataframe types as sometimes there will be no associated experimental data
dataframe_type = "Stability"
if (not experimental_data_exists) and create_binding_affinity_dataframe:
# A hacky case used for the PUBS year 1 results which were monomeric stability predictions rescored to be used as binding affinity predictions
dataframe_type = "Binding affinity"
assert(create_binding_affinity_dataframe == False or (not experimental_data_exists)) # Part of the PUBS hack. We cannot create this dataframe since we do not have associated experimental data
parameters = copy.copy(locals())
del parameters['self']
del parameters['create_binding_affinity_dataframe']
return super(MonomericStabilityDDGInterface, self)._get_analysis_dataframe(BindingAffinityBenchmarkRun, **parameters)
################################################################################################
## Application layer
## These functions combine the database and prediction data with useful klab
################################################################################################
#== PyMOL API ===========================================================
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, wt_task_number, mutant_task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
# Retrieve and unzip results
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
# Get the name of the files from the zip
wildtype_filename = os.path.join(str(prediction_id), 'repacked_wt_round_%d.pdb' % wt_task_number)
mutant_filename = None
for filepath in sorted(zipped_content.namelist()):
filename = os.path.split(filepath)[1]
if filename.startswith('mut_') and filename.endswith('_round_%d.pdb' % mutant_task_number):
mutant_filename = os.path.join(str(prediction_id), filename)
break
PyMOL_session = None
file_list = zipped_content.namelist()
# If both files exist in the zip, extract their contents in memory and create a PyMOL session pair (PSE, script)
if (mutant_filename in file_list) and (wildtype_filename in file_list):
wildtype_pdb = zipped_content.open(wildtype_filename, 'r').read()
mutant_pdb = zipped_content.open(mutant_filename, 'U').read()
# todo: this should be structure_1_name = 'Wildtype', structure_2_name = 'Mutant' but the underlying PyMOL script needs to be parameterized
chain_mapper = ScaffoldModelChainMapper.from_file_contents(wildtype_pdb, mutant_pdb, structure_1_name = 'Scaffold', structure_2_name = 'Model')
PyMOL_session = chain_mapper.generate_pymol_session(pymol_executable = pymol_executable)
zipped_content.close()
return PyMOL_session
except Exception, e:
zipped_content.close()
raise Exception(str(e))
################################################################################################
## Subclass-specific API layer
## These are functions written specifically for this class which are not necessarily available
## in sibling classes
################################################################################################
@analysis_api
def get_predictionset_data(self, predictionset, userdataset_textid, cached_pdb_details = None, only_single = False):
'''
A helper function for analysis / generating graphs.
Arguments:
predictionset - the name of a PredictionSet
cached_pdb_details - a cached copy of the pdb_details returned by this function. Generating this dict is the slowest step so caching is recommended.
only_single - restrict to single mutations
Returns:
amino_acids - details about amino acid types
pdb_details - details about PDB file techniques, resolution, chain lengths, and whether it is a transmembrane protein
predictions - a mapping: Prediction IDs -> ExperimentID, UserDataSetExperimentID, Experiment and Prediction (this is the one used for the prediction) PDB IDs, scores. If single mutation then also mutation details, DSSP, and exposure.
analysis_datasets - a mapping: analysis subset (e.g. "Guerois") -> Prediction IDs -> (prediction) PDB_ID, ExperimentID, ExperimentDDG (mean of experimental values)
'''
UserDataSetID = self.DDG_db.execute_select("SELECT ID FROM UserDataSet WHERE TextID=%s", parameters=(userdataset_textid,))
assert(UserDataSetID)
UserDataSetID = UserDataSetID[0]['ID']
amino_acids = self.get_amino_acid_details()
prediction_chains = self._get_pdb_chains_used_for_prediction_set(predictionset)
# Get the list of mutation predictions
if only_single:
prediction_records = self.DDG_db.execute_select('''
SELECT a.ID AS PredictionID FROM
(
SELECT Prediction.ID, Prediction.ExperimentID, UserDataSetExperimentID, COUNT(Prediction.ID) AS NumMutations
FROM Prediction
INNER JOIN ExperimentMutation ON ExperimentMutation.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet = %s
GROUP BY Prediction.ID
) AS a
WHERE a.NumMutations=1''', parameters=(predictionset,))
else:
prediction_records = self.DDG_db.execute_select('''
SELECT a.ID AS PredictionID FROM
(
SELECT Prediction.ID, Prediction.ExperimentID, UserDataSetExperimentID, COUNT(Prediction.ID) AS NumMutations
FROM Prediction
INNER JOIN ExperimentMutation ON ExperimentMutation.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet = %s
GROUP BY Prediction.ID
) AS a''', parameters=(predictionset,))
allowed_prediction_ids = set([m['PredictionID'] for m in prediction_records])
kellogg_score_method_id = self.DDG_db.execute_select('''SELECT ID FROM ScoreMethod WHERE MethodName='Global' AND MethodType='Protocol 16' ''')
assert(len(kellogg_score_method_id) == 1)
kellogg_score_method_id = kellogg_score_method_id[0]['ID']
noah_8A_positional_score_method_id = self.DDG_db.execute_select('''SELECT ID FROM ScoreMethod WHERE MethodName='Local' AND MethodType='Position' ''')
assert(len(noah_8A_positional_score_method_id) == 1)
noah_8A_positional_score_method_id = noah_8A_positional_score_method_id[0]['ID']
# Hack - add on the mutations from the datasets which were represented as single mutants (in the original datasets) but which are double mutants
# See ExperimentAssays ( 917, 918, 919, 920, 922, 7314, 932, 933, 936, 937, 938, 2076, 7304, 7305, 7307, 7308, 7309, 7310, 7312, 7315, 7316, 7317, 7320 )
# or Experiments (111145, 110303, 110284, 110287, 110299, 110300, 110285, 110286, 110289, 114180, 114175, 114177, 114171, 110304, 110305, 114179, 114168, 114170, 114172, 114173, 114178, 114167)
# or PubMed IDs 7479708, 9079363, and 9878405)
badly_entered_predictions = self.DDG_db.execute_select('''
SELECT Prediction.ID AS PredictionID FROM Prediction
INNER JOIN Experiment ON Experiment.ID=Prediction.ExperimentID
WHERE PredictionSet=%s
AND ExperimentID IN (111145, 110303, 110284, 110287, 110299, 110300, 110285, 110286, 110289, 114180, 114175, 114177, 114171, 110304, 110305, 114179, 114168, 114170, 114172, 114173, 114178, 114167)''', parameters=(predictionset,))
badly_entered_predictions = set([r['PredictionID'] for r in badly_entered_predictions])
allowed_prediction_ids = allowed_prediction_ids.union(badly_entered_predictions)
# Read in the PredictionStructureScore records
kellogg_structure_score_query = self.DDG_db.execute_select('''
SELECT PredictionID, ScoreType, StructureID, total FROM PredictionStructureScore
WHERE PredictionID >=%s
AND PredictionID <=%s
AND (ScoreType = 'Mutant' OR ScoreType = 'WildType')
AND ScoreMethodID=%s
''', parameters=(min(allowed_prediction_ids), max(allowed_prediction_ids), kellogg_score_method_id))
kellogg_structure_scores = {}
for kss in kellogg_structure_score_query:
PredictionID = kss['PredictionID']
kellogg_structure_scores[PredictionID] = kellogg_structure_scores.get(PredictionID, {'WildType' : {}, 'Mutant' : {}})
kellogg_structure_scores[PredictionID][kss['ScoreType']][kss['StructureID']] = kss['total']
# Get the Prediction records for the mutation predictions and the list of PDB IDs
num_predictions = 0
predictions = {}
experiment_to_prediction_map = {}
prediction_ids = set()
pdb_ids = set()
failures = 0
prediction_results = self.DDG_db.execute_select('''
SELECT Prediction.ID AS PredictionID, Prediction.ExperimentID, UserDataSetExperimentID, Experiment.PDBFileID AS ePDB, UserDataSetExperiment.PDBFileID AS pPDB, Scores
FROM Prediction
INNER JOIN Experiment ON Experiment.ID=Prediction.ExperimentID
INNER JOIN UserDataSetExperiment ON UserDataSetExperiment.ID=Prediction.UserDataSetExperimentID
WHERE PredictionSet=%s''', parameters=(predictionset,))
for p in prediction_results:
id = p['PredictionID']
experiment_id = p['ExperimentID']
if id not in allowed_prediction_ids:
continue
num_predictions += 1
experiment_to_prediction_map[(experiment_id, p['pPDB'])] = id
assert(id not in predictions)
prediction_ids.add(id)
pdb_ids.add(p['ePDB'])
pdb_ids.add(p['pPDB'])
if p['Scores']:
scores = json.loads(p['Scores'])
# Retrieve the scores from the PredictionStructureScore records
# todo: this is only done for the Kellogg scores at present
kellogg_output_score = scores['data']['kellogg']['total']['ddG']
individual_scores = kellogg_structure_scores.get(id)
assert(individual_scores)
assert(len(individual_scores['Mutant']) == 50) # todo: parameterize
assert(len(individual_scores['WildType']) == 50) # todo: parameterize
sorted_mutant_scores = sorted(individual_scores['Mutant'].values())
sorted_wildtype_scores = sorted(individual_scores['WildType'].values())
# Compute three scores - the best pair, the average of the best 3 pairs, and the average of the best 5 pairs.
kellogg_top1 = sorted_mutant_scores[0] - sorted_wildtype_scores[0]
kellogg_top3 = (sum(sorted_mutant_scores[:3]) - sum(sorted_wildtype_scores[:3]))/3.0
kellogg_top5 = (sum(sorted_mutant_scores[:5]) - sum(sorted_wildtype_scores[:5]))/5.0
print(kellogg_output_score, kellogg_top1, kellogg_top3, kellogg_top5)
assert(abs(kellogg_output_score - kellogg_top1) < 0.01)
p['Kellogg_top1'] = kellogg_top1
p['Kellogg_top3'] = kellogg_top3
p['Kellogg_top5'] = kellogg_top5
if scores['data'].get('noah_8,0A'):
p['Noah'] = scores['data']['noah_8,0A']['positional']['ddG']
else:
p['Noah'] = None
else:
p['Kellogg_top1'] = None
p['Kellogg_top3'] = None
p['Kellogg_top5'] = None
p['Noah'] = None
failures += 1
del p['PredictionID']
del p['Scores']
predictions[id] = p
assert(len(experiment_to_prediction_map) == num_predictions)
# Get the PDB chain for each prediction
missing_count = 0
for pc in prediction_chains:
if pc['ID'] not in allowed_prediction_ids:
continue
prediction = predictions.get(pc['ID'])
if prediction:
assert(None == prediction.get('Chain'))
prediction['Chain'] = pc['Chain']
else:
raise Exception('Missing chain data')
# Get the mutation details for each single mutation prediction
mutation_details_1 = self.DDG_db.execute_select('''
SELECT a.ID AS PredictionID, UserDataSetExperiment.PDBFileID as pPDB, ExperimentMutation.Chain, ExperimentMutation.ResidueID, ExperimentMutation.WildTypeAA, ExperimentMutation.MutantAA,
PDBResidue.MonomericExposure, PDBResidue.MonomericDSSP
FROM
(
SELECT Prediction.ID, Prediction.ExperimentID, UserDataSetExperimentID, COUNT(Prediction.ID) AS NumMutations
FROM Prediction
INNER JOIN ExperimentMutation ON ExperimentMutation.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet = %s
GROUP BY Prediction.ID
) AS a
INNER JOIN ExperimentMutation ON a.ExperimentID=ExperimentMutation.ExperimentID
INNER JOIN UserDataSetExperiment ON UserDataSetExperiment.ID=a.UserDataSetExperimentID
INNER JOIN PDBResidue
ON (PDBResidue.PDBFileID=UserDataSetExperiment.PDBFileID
AND PDBResidue.Chain=ExperimentMutation.Chain
AND TRIM(PDBResidue.ResidueID)=TRIM(ExperimentMutation.ResidueID))
WHERE a.NumMutations=1''', parameters=(predictionset,))
# Hack for 1U5P. Note: TRIM removes warnings e.g. "Warning: Truncated incorrect INTEGER value: '1722 '".
mutation_details_2 = self.DDG_db.execute_select('''
SELECT a.ID AS PredictionID, UserDataSetExperiment.PDBFileID as pPDB, ExperimentMutation.Chain, ExperimentMutation.ResidueID, ExperimentMutation.WildTypeAA, ExperimentMutation.MutantAA,
PDBResidue.MonomericExposure, PDBResidue.MonomericDSSP
FROM
(
SELECT Prediction.ID, Prediction.ExperimentID, UserDataSetExperimentID, COUNT(Prediction.ID) AS NumMutations
FROM Prediction
INNER JOIN ExperimentMutation ON ExperimentMutation.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet = %s
GROUP BY Prediction.ID
) AS a
INNER JOIN ExperimentMutation ON a.ExperimentID=ExperimentMutation.ExperimentID
INNER JOIN UserDataSetExperiment ON UserDataSetExperiment.ID=a.UserDataSetExperimentID
INNER JOIN PDBResidue
ON (PDBResidue.PDBFileID=UserDataSetExperiment.PDBFileID
AND PDBResidue.Chain=ExperimentMutation.Chain
AND CAST(TRIM(PDBResidue.ResidueID) AS UNSIGNED) - 1762 = CAST(TRIM(ExperimentMutation.ResidueID) AS UNSIGNED))
WHERE a.NumMutations=1 AND UserDataSetExperiment.PDBFileID="1U5P" ''', parameters=(predictionset,), quiet=True)
mutation_details = mutation_details_1 + mutation_details_2
all_prediction_ids = set([m['PredictionID'] for m in prediction_records])
found_prediction_ids = set([m['PredictionID'] for m in mutation_details])
#assert(len(found_prediction_ids) == len(all_prediction_ids))
for m in mutation_details:
prediction = predictions[m['PredictionID']]
prediction['DSSP'] = dssp_elision.get(m['MonomericDSSP'])
prediction['Exposure'] = m['MonomericExposure']
prediction['WTAA'] = m['WildTypeAA']
prediction['MutantAA'] = m['MutantAA']
prediction['ResidueID'] = m['ResidueID']
# Add missing fields for the set of badly_entered_predictions and multiple mutations
for prediction_id, d in sorted(predictions.iteritems()):
if 'DSSP' not in d.keys():
# todo: if there is a G or P in any mutation, mark this record ["GP"] = True
# use this below to separate the GP mutations, rather than checking the wtaa and mutaa there
prediction['DSSP'] = None
prediction['Exposure'] = None
prediction['WTAA'] = None
prediction['MutantAA'] = None
prediction['ResidueID'] = None
# We can derive the following data:
# TM, Resolution, XRay per PDB
# for prediction_id, d in predictions.iteritems():
# assoc_pdb = pdb_details[d['pPDB']]
# d['TM'] = assoc_pdb['TM'] == 1
# d['XRay'] = assoc_pdb['XRay']
# d['Resolution'] = assoc_pdb['Resolution']
# Derive GP (if wt or mutant is glycine or proline)
# Derive WTPolarity, WTAromaticity, MutantPolarity, MutantAromaticity
# Derive SL, LS, SS, LL
# Derive ChainLength: prediction['ChainLength'] = pdbs[pc['PDBFileID']]['chains'][pc['Chain']]
# AnalysisSets are defined on UserDataSets. The main 'datasets' are defined in the Subset field of the AnalysisSet records associated
# with the UserDataSet i.e. AnalysisSets for a UserDataSet := "SELECT DISTINCT Subset FROM UserAnalysisSet WHERE UserDataSetID=x".
from analysis import UserDataSetExperimentalScores
analysis_data = {}
analysis_subsets = [r['Subset'] for r in self.DDG_db.execute_select("SELECT DISTINCT Subset FROM UserAnalysisSet WHERE UserDataSetID=%s", parameters=(UserDataSetID,))]
for analysis_subset in analysis_subsets:
analysis_data[analysis_subset] = {}
adata = analysis_data[analysis_subset]
adata['Missing'] = []
UDS_scores = UserDataSetExperimentalScores(self.DDG_db, 1, analysis_subset)
count = 0
for section, sectiondata in sorted(UDS_scores.iteritems()):
for recordnumber, record_data in sorted(sectiondata.iteritems()):
PDB_ID = record_data["PDB_ID"]
ExperimentID = record_data["ExperimentID"]
ExperimentalDDG = record_data["ExperimentalDDG"]
prediction_id = experiment_to_prediction_map.get((record_data['ExperimentID'], record_data['PDB_ID']))
if prediction_id != None:
adata[prediction_id] = record_data # PDB_ID, ExperimentID, ExperimentalDDG
else:
adata['Missing'].append((record_data['ExperimentID'], record_data['PDB_ID']))
count += 1
return AnalysisBreakdown(
amino_acids,
self.get_pdb_details_for_analysis(pdb_ids, cached_pdb_details = cached_pdb_details),
predictions,
analysis_data, # a mapping: analysis subset (e.g. "Guerois") -> Prediction IDs -> (prediction) PDB_ID, ExperimentID, ExperimentDDG (mean of experimental values)
)
### Dataset stats functions
@analysis_api
def get_analysis_set_overlap_by_Experiment(self, restrict_to_subsets = set(), UserDataSetID = 1):
''' Returns the overlap between analysis sets of a UserDataSet where overlap is determined by the set of ExperimentIDs.
Caveat: This assumes that the Experiments do not overlap. While this is mostly true at present, there are probably
still some duplicates.
Returns a symmetric matrix (as a pandas dataframe) with the pairwise overlaps.
Usage: self.get_dataset_overlap_by_Experiment(restrict_to_subsets = ['CuratedProTherm', 'Guerois', 'Kellogg', 'Potapov']).'''
# Read the list of experiments from the database
analysis_set_experiments = {}
results = self.DDG_db.execute_select('SELECT * FROM UserAnalysisSet WHERE UserDataSetID=%s', parameters=(UserDataSetID,))
for r in results:
subset = r['Subset']
if (len(restrict_to_subsets) == 0) or (subset in restrict_to_subsets):
analysis_set_experiments[subset] = analysis_set_experiments.get(subset, set())
analysis_set_experiments[subset].add(r['ExperimentID'])
analysis_sets = sorted(analysis_set_experiments.keys())
m = []
for x in analysis_sets:
mx = []
for y in analysis_sets:
mx.append(len(analysis_set_experiments[x].intersection(analysis_set_experiments[y])))
m.append(mx)
df = pd.DataFrame(m, index = analysis_sets, columns = analysis_sets)
return df
@analysis_api
def get_analysis_set_disjoint_by_Experiment(self, primary_subset, other_subsets = set(), UserDataSetID = 1):
''' Returns the overlap between analysis sets of a UserDataSet where overlap is determined by the set of ExperimentIDs.
Caveat: This assumes that the Experiments do not overlap. While this is mostly true at present, there are probably
still some duplicates.
Returns a symmetric matrix (as a pandas dataframe) with the pairwise overlaps.
Usage: self.get_dataset_overlap_by_Experiment(other_subsets = ['CuratedProTherm', 'Guerois', 'Kellogg', 'Potapov']).'''
# Read the list of experiments from the database
analysis_set_experiments = {}
primary_analysis_set_experiments = set()
results = self.DDG_db.execute_select('SELECT * FROM UserAnalysisSet WHERE UserDataSetID=%s', parameters=(UserDataSetID,))
for r in results:
subset = r['Subset']
if subset != primary_subset:
if (len(other_subsets) == 0) or (subset in other_subsets):
analysis_set_experiments[subset] = analysis_set_experiments.get(subset, set())
analysis_set_experiments[subset].add(r['ExperimentID'])
else:
primary_analysis_set_experiments.add(r['ExperimentID'])
# Create the subsets
analysis_set_common_experiments = {}
analysis_set_disjoint_experiments = {}
analysis_sets = sorted(analysis_set_experiments.keys())
m = []
for x in analysis_sets:
analysis_set_common_experiments[x] = analysis_set_experiments[x].intersection(primary_analysis_set_experiments)
analysis_set_disjoint_experiments[x] = analysis_set_experiments[x].difference(primary_analysis_set_experiments)
assert(len(analysis_set_common_experiments[x]) + len(analysis_set_disjoint_experiments[x]) == len(analysis_set_experiments[x]))
assert(primary_subset not in analysis_set_experiments.keys())
all_common_analysis_set_experiments = {}
for x in analysis_set_experiments.keys():
all_common_analysis_set_experiments[x] = analysis_set_common_experiments[x]
all_common_analysis_set_experiments[primary_subset] = primary_analysis_set_experiments
all_common_analysis_sets = sorted(all_common_analysis_set_experiments.keys())
m = []
for x in all_common_analysis_sets:
mx = []
for y in all_common_analysis_sets:
mx.append(len(all_common_analysis_set_experiments[x].intersection(all_common_analysis_set_experiments[y])))
m.append(mx)
df = pd.DataFrame(m, index = all_common_analysis_sets, columns = all_common_analysis_sets)
other_sets = analysis_set_experiments.keys()
if len(other_sets) == 3:
print(other_sets)
print('Common to all three other sets in the intersection: %d' % len(analysis_set_common_experiments[other_sets[0]].intersection(analysis_set_common_experiments[other_sets[1]]).intersection(analysis_set_common_experiments[other_sets[2]])))
print('Common to all three other sets in the disjoint set: %d' % len(analysis_set_disjoint_experiments[other_sets[0]].intersection(analysis_set_disjoint_experiments[other_sets[1]]).intersection(analysis_set_disjoint_experiments[other_sets[2]])))
print('ere')
print('\nCommon to primary set\n')
print(df)
all_disjoint_analysis_sets = sorted(analysis_set_disjoint_experiments.keys())
m = []
for x in all_disjoint_analysis_sets:
mx = []
for y in all_disjoint_analysis_sets:
mx.append(len(analysis_set_disjoint_experiments[x].intersection(analysis_set_disjoint_experiments[y])))
m.append(mx)
df = pd.DataFrame(m, index = all_disjoint_analysis_sets, columns = all_disjoint_analysis_sets)
print('\nDisjoint from primary set\n')
print(df)
return df
@analysis_api
def get_analysis_set_overlap_by_Experiment_as_radii(self, max_radius, restrict_to_subsets = set(), UserDataSetID = 1):
'''Todo: look at where this was called and figure out what I was doing. I think this was used to decide on the relative sizes of overlaps between datasets in a Venn diagram.'''
df = self.get_analysis_set_overlap_by_Experiment(restrict_to_subsets = restrict_to_subsets, UserDataSetID = UserDataSetID)
# Determine the relative sizes for the radii (a = pi.r^2 so pi cancels out).
radii_ratios = df.apply(lambda x: numpy.sqrt(x))
# Get the max value and determine the scalar
max_value = max(numpy.hstack(radii_ratios.values))
scalar = float(max_radius) / max_value
# Returned the scaled matrix
return radii_ratios.apply(lambda x: x * scalar)
################################################################################################
## Private API layer
## These are helper functions used internally by the class but which are not intended for export
################################################################################################
###########################################################################################
## Subclass layer
##
## These functions need to be implemented by subclasses
###########################################################################################
# Concrete functions
def _get_sqa_prediction_table(self): return dbmodel.Prediction
def _get_sqa_prediction_structure_scores_table(self): return dbmodel.PredictionStructureScore
def _get_prediction_table(self): return 'Prediction'
def _get_prediction_structure_scores_table(self): return 'PredictionStructureScore'
def _get_prediction_type(self): return 'ProteinStability'
def _get_prediction_dataset_type(self): return 'Protein stability'
def _get_prediction_type_description(self): return 'monomeric stability'
def _get_user_dataset_experiment_table(self): return 'UserDataSetExperiment'
def _get_user_dataset_experiment_tag_table(self): raise Exception('To be added.')
def _get_allowed_score_types(self): return set(['DDG', 'WildType', 'Mutant'])
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_pdb
def _get_pdb_chains_used_for_prediction_set(self, prediction_set):
return self.DDG_db.execute_select('''
SELECT Prediction.ID, Experiment.PDBFileID, Chain
FROM Prediction
INNER JOIN Experiment ON Experiment.ID=Prediction.ExperimentID
INNER JOIN ExperimentChain ON ExperimentChain.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet=%s''', parameters=(prediction_set,))
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
def _charge_prediction_set_by_residue_count(self, PredictionSet):
'''This function assigns a cost for a prediction equal to the number of residues in the chains.'''
raise Exception('This function needs to be rewritten.')
from klab.bio.rcsb import parseFASTAs
DDG_db = self.DDG_db
predictions = DDG_db.execute_select("SELECT ID, ExperimentID FROM Prediction WHERE PredictionSet=%s", parameters=(PredictionSet,))
PDB_chain_lengths ={}
for prediction in predictions:
chain_records = DDG_db.execute_select('SELECT PDBFileID, Chain FROM Experiment INNER JOIN ExperimentChain ON ExperimentID=Experiment.ID WHERE ExperimentID=%s', parameters=(prediction['ExperimentID']))
num_residues = 0
for chain_record in chain_records:
key = (chain_record['PDBFileID'], chain_record['Chain'])
if PDB_chain_lengths.get(key) == None:
fasta = DDG_db.execute_select("SELECT FASTA FROM PDBFile WHERE ID=%s", parameters = (chain_record['PDBFileID'],))
assert(len(fasta) == 1)
fasta = fasta[0]['FASTA']
f = parseFASTAs(fasta)
PDB_chain_lengths[key] = len(f[chain_record['PDBFileID']][chain_record['Chain']])
chain_length = PDB_chain_lengths[key]
num_residues += chain_length
print("UPDATE Prediction SET Cost=%0.2f WHERE ID=%d" % (num_residues, prediction['ID']))
predictions = DDG_db.execute("UPDATE Prediction SET Cost=%s WHERE ID=%s", parameters=(num_residues, prediction['ID'],))
|
mit
|
cbentivoglio/neurolearn_clone
|
docs/conf.py
|
1
|
10208
|
# -*- coding: utf-8 -*-
#
# neurolearn documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 4 07:22:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# ReadTheDocks doesn't support necessary C dependencies (e.g., Atlas), so we
# mock them out per https://docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules.
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['importlib','numpy', 'scipy', 'pandas', 'sklearn', 'nibabel',
'matplotlib', 'matplotlib.pyplot','seaborn','sklearn.pipeline',
'sklearn.pipeline.Pipeline','nilearn','nilearn.input_data','nilearn.plotting',
'nilearn.input_data.NiftiMasker','scipy.stats','scipy.stats.norm',
'scipy.stats.binom_test','sklearn.metrics','sklearn.metrics.auc']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'neurolearn'
copyright = u'2015, Luke Chang'
author = u'Luke Chang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.1'
# The full version, including alpha/beta/rc tags.
release = '.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'neurolearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'neurolearn.tex', u'neurolearn Documentation',
u'Luke Chang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'neurolearn', u'neurolearn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'neurolearn', u'neurolearn Documentation',
author, 'neurolearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/examples/getCameraImageTest.py
|
2
|
4584
|
import matplotlib.pyplot as plt
import numpy as np
import pybullet as p
import time
import pybullet_data
direct = p.connect(p.GUI) #, options="--window_backend=2 --render_device=0")
#egl = p.loadPlugin("eglRendererPlugin")
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF('plane.urdf')
p.loadURDF("r2d2.urdf", [0, 0, 1])
p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.0, 0.025])
cube_trans = p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.1, 0.025])
p.changeVisualShape(cube_trans, -1, rgbaColor=[1, 1, 1, 0.1])
width = 128
height = 128
fov = 60
aspect = width / height
near = 0.02
far = 1
view_matrix = p.computeViewMatrix([0, 0, 0.5], [0, 0, 0], [1, 0, 0])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
# Get depth values using the OpenGL renderer
images = p.getCameraImage(width,
height,
view_matrix,
projection_matrix,
shadow=True,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_opengl = np.reshape(images[2], (height, width, 4)) * 1. / 255.
depth_buffer_opengl = np.reshape(images[3], [width, height])
depth_opengl = far * near / (far - (far - near) * depth_buffer_opengl)
seg_opengl = np.reshape(images[4], [width, height]) * 1. / 255.
time.sleep(1)
# Get depth values using Tiny renderer
images = p.getCameraImage(width,
height,
view_matrix,
projection_matrix,
shadow=True,
renderer=p.ER_TINY_RENDERER)
depth_buffer_tiny = np.reshape(images[3], [width, height])
depth_tiny = far * near / (far - (far - near) * depth_buffer_tiny)
rgb_tiny = np.reshape(images[2], (height, width, 4)) * 1. / 255.
seg_tiny = np.reshape(images[4], [width, height]) * 1. / 255.
bearStartPos1 = [-3.3, 0, 0]
bearStartOrientation1 = p.getQuaternionFromEuler([0, 0, 0])
bearId1 = p.loadURDF("plane.urdf", bearStartPos1, bearStartOrientation1)
bearStartPos2 = [0, 0, 0]
bearStartOrientation2 = p.getQuaternionFromEuler([0, 0, 0])
bearId2 = p.loadURDF("teddy_large.urdf", bearStartPos2, bearStartOrientation2)
textureId = p.loadTexture("checker_grid.jpg")
for b in range(p.getNumBodies()):
p.changeVisualShape(b, linkIndex=-1, textureUniqueId=textureId)
for j in range(p.getNumJoints(b)):
p.changeVisualShape(b, linkIndex=j, textureUniqueId=textureId)
viewMat = [
0.642787516117096, -0.4393851161003113, 0.6275069713592529, 0.0, 0.766044557094574,
0.36868777871131897, -0.5265407562255859, 0.0, -0.0, 0.8191521167755127, 0.5735764503479004,
0.0, 2.384185791015625e-07, 2.384185791015625e-07, -5.000000476837158, 1.0
]
projMat = [
0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0,
0.0, 0.0, -0.02000020071864128, 0.0
]
images = p.getCameraImage(width,
height,
viewMatrix=viewMat,
projectionMatrix=projMat,
renderer=p.ER_BULLET_HARDWARE_OPENGL,
flags=p.ER_USE_PROJECTIVE_TEXTURE,
projectiveTextureView=viewMat,
projectiveTextureProj=projMat)
proj_opengl = np.reshape(images[2], (height, width, 4)) * 1. / 255.
time.sleep(1)
images = p.getCameraImage(width,
height,
viewMatrix=viewMat,
projectionMatrix=projMat,
renderer=p.ER_TINY_RENDERER,
flags=p.ER_USE_PROJECTIVE_TEXTURE,
projectiveTextureView=viewMat,
projectiveTextureProj=projMat)
proj_tiny = np.reshape(images[2], (height, width, 4)) * 1. / 255.
# Plot both images - should show depth values of 0.45 over the cube and 0.5 over the plane
plt.subplot(4, 2, 1)
plt.imshow(depth_opengl, cmap='gray', vmin=0, vmax=1)
plt.title('Depth OpenGL3')
plt.subplot(4, 2, 2)
plt.imshow(depth_tiny, cmap='gray', vmin=0, vmax=1)
plt.title('Depth TinyRenderer')
plt.subplot(4, 2, 3)
plt.imshow(rgb_opengl)
plt.title('RGB OpenGL3')
plt.subplot(4, 2, 4)
plt.imshow(rgb_tiny)
plt.title('RGB Tiny')
plt.subplot(4, 2, 5)
plt.imshow(seg_opengl)
plt.title('Seg OpenGL3')
plt.subplot(4, 2, 6)
plt.imshow(seg_tiny)
plt.title('Seg Tiny')
plt.subplot(4, 2, 7)
plt.imshow(proj_opengl)
plt.title('Proj OpenGL')
plt.subplot(4, 2, 8)
plt.imshow(proj_tiny)
plt.title('Proj Tiny')
plt.subplots_adjust(hspace=0.7)
plt.show()
|
agpl-3.0
|
sergej-C/dl_utils
|
dlc_utils.py
|
1
|
7742
|
#from fast.ai course github repo: https://github.com/fastai/courses/tree/master/deeplearning1/nbs
from __future__ import division,print_function
import math, os, json, sys, re
import cPickle as pickle
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
import PIL
from PIL import Image
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
import scipy
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
import bcolz
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
from IPython.lib.display import FileLink
import theano
from theano import shared, tensor as T
from theano.tensor.nnet import conv2d, nnet
from theano.tensor.signal import pool
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, activity_l2, l1, activity_l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils.layer_utils import layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import *
from vgg16bn import *
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img):
return np.rollaxis(img,0,3).dot(to_bw)
def to_plot(img):
return np.rollaxis(img, 0, 3).astype(np.uint8)
def plot(img):
plt.imshow(to_plot(img))
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
if titles is not None:
sp.set_title(titles[i], fontsize=18)
plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
clipped = np.clip(arr, (1-mx)/1, mx)
return clipped/clipped.sum(axis=1)[:, np.newaxis]
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def onehot(x):
return to_categorical(x)
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
def adjust_dropout(weights, prev_p, new_p):
scal = (1-prev_p)/(1-new_p)
return [o*scal for o in weights]
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
return np.concatenate([batches.next() for i in range(batches.nb_sample)])
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def mk_size(img, r2c):
r,c,_ = img.shape
curr_r2c = r/c
new_r, new_c = r,c
if r2c>curr_r2c:
new_r = floor(c*r2c)
else:
new_c = floor(r/r2c)
arr = np.zeros((new_r, new_c, 3), dtype=np.float32)
r2=(new_r-r)//2
c2=(new_c-c)//2
arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img
return arr
def mk_square(img):
x,y,_ = img.shape
maxs = max(img.shape[:2])
y2=(maxs-y)//2
x2=(maxs-x)//2
arr = np.zeros((maxs,maxs,3), dtype=np.float32)
arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img
return arr
def vgg_ft(out_dim):
vgg = Vgg16()
vgg.ft(out_dim)
model = vgg.model
return model
def vgg_ft_bn(out_dim):
vgg = Vgg16BN()
vgg.ft(out_dim)
model = vgg.model
return model
def get_classes(path):
batches = get_batches(path+'train', shuffle=False, batch_size=1)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
val_batches.filenames, batches.filenames, test_batches.filenames)
def split_at(model, layer_type):
layers = model.layers
layer_idx = [index for index,layer in enumerate(layers)
if type(layer) is layer_type][-1]
return layers[:layer_idx+1], layers[layer_idx+1:]
class MixIterator(object):
def __init__(self, iters):
self.iters = iters
self.multi = type(iters) is list
if self.multi:
self.N = sum([it[0].N for it in self.iters])
else:
self.N = sum([it.N for it in self.iters])
def reset(self):
for it in self.iters: it.reset()
def __iter__(self):
return self
def next(self, *args, **kwargs):
if self.multi:
nexts = [[next(it) for it in o] for o in self.iters]
n0s = np.concatenate([n[0] for n in o])
n1s = np.concatenate([n[1] for n in o])
return (n0, n1)
else:
nexts = [next(it) for it in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
|
mit
|
hbar/python-ChargedParticleTools
|
lib/ChargedParticleTools/DensityProfiles.py
|
1
|
5843
|
## DensityProfiles.py
## Dr. Harold Barnard and Jonathan Terry
## 8/21/2014
import numpy as np
import matplotlib.pyplot as plt
## Aggregation of various density profiles, maintaining the total density profile of
## the target material.
## Used for statistics management in simulation
class TargetRegion(object):
def __init__(self, densityProfiles):
self.densityProfiles = densityProfiles
self.totalTargetLength = densityProfiles[0].totalTargetDistance
self.interactions = []
def recordReaction(self, depthIndex):
totalNumberDensity = 0
composition = {}
for elem in self.densityProfiles:
name = elem.name
numberDensity = elem.profileRange[depthIndex]
totalNumberDensity += numberDensity
if (name in composition):
composition[name] += numberDensity
else:
composition[name] = numberDensity
print "REACTION RECORDED AT " + str(depthIndex/100.0)
print "Element\t Percentage\n"
for elem in composition.keys():
composition[elem] = 100*composition[elem]/totalNumberDensity
if (composition[elem] > .001):
print elem + "\t" + str(composition[elem]) + "\n"
def visualizeTotalTarget(self):
frame, fig = plt.subplots()
for elem in self.densityProfiles:
fig.plot(elem.profileDomain, elem.profileRange, label = elem.regionName)
legend = fig.legend(loc = 'upper right', shadow = True)
plt.title('Target Density Profile')
plt.xlabel('Target Depth')
plt.ylabel('Number Densities in Target')
plt.show()
## Supports the creation of either gaussian or step function density
## distributions. Across a target region (compilation of density profiles)
## the totalTargetDistance must be constant.
## totalTargetDistance = total thickness of target
## regionName = name of region (used for statistical analysis)
## regionSymbol = symbol for element that region is made of
## shape = shape of desired number density distribution (either Gaussian or recangular)
## mu = average of Gaussian OR center of rectangle
## sigma = standard dev of Gaussian OR half width of recatngle
## scale = maximum number density of distribution
## edgeTolerance = standard dev of Gaussian used to smooth rectangular distributions
class DensityProfile(object):
def __init__(self, totalTargetDistance, regionName, regionSymbol, shape, mu, sigma, scale, edgeTolerance = 1):
self.profileDomain = np.linspace(0, totalTargetDistance, 10000)
self.totalTargetDistance = totalTargetDistance
self.regionName = regionName
with open('Universal Data Table.txt') as elementData:
for line in elementData.readlines()[5:]:
line.strip()
col0, col1, col2, col3, col4, col5 = line.split()
if (regionSymbol == col1):
self.atomicNumber = int(col0)
self.symbol = col1
self.name = col2
self.atomicMass = int(col0)/float(col3)
self.meanIonization = float(col4)
self.density = float(col5)
break
if (shape == 'Gaussian'):
gaussScale = scale*sigma*np.sqrt(2*np.pi)
self.profileRange = self.gaussian(self.profileDomain, mu, sigma, gaussScale)
else:
self.profileRange = self.rectangle(self.profileDomain, mu, sigma, scale)
self.smooth(mu, sigma, scale, edgeTolerance)
def smooth(self, mu, sigma, scale, edgeTolerance):
gaussScale = scale*edgeTolerance*np.sqrt(2*np.pi)
self.profileContourSubtract('Gaussian', mu-sigma, edgeTolerance, gaussScale)
self.profileContourSubtract('Gaussian', mu+sigma, edgeTolerance, gaussScale)
def profileContourAdd(self, shape, mu, sigma, scale):
if (shape == 'Gaussian'):
appendage = self.gaussian(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange + appendage
else:
appendage = self.rectangle(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange + appendage
def profileContourSubtract(self, shape, mu, sigma, scale):
if (shape == 'Gaussian'):
appendage = self.gaussian(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange - appendage
np.clip(self.profileRange, 0, np.inf, out = self.profileRange)
else:
appendage = self.rectangle(self.profileDomain, mu, sigma, scale)
self.profileRange = self.profileRange - appendage
np.clip(self.profileRange, 0, np.inf, out = self.profileRange)
def addProfile(self, profile):
self.profileRange = self.profileRange + profile.profileRange
def subtractProfile(self, profile):
self.profileRange = self.profileRange - profile.profileRange
def gaussian(self, x, mu, sigma, scale):
scaledNormalization = scale/np.sqrt(2*np.pi*sigma*sigma)
gaussian = lambda i: scaledNormalization*np.exp(-((i-mu)**2)/(2*sigma**2))
return np.array([gaussian(i) for i in x])
def rectangle(self, x, mu, sigma, scale):
heaviside = lambda i: scale if (i > (mu-sigma) and i < (mu+sigma)) else 0
return np.array([heaviside(i) for i in x])
def visualize(self):
print "REGION SELECTED: " + self.regionName
print "Displaying distribution for " + self.name + " in a " + str(self.totalTargetDistance) + " micron target."
plt.xlabel('Target Depth')
plt.ylabel(self.name + " Number Density in Target")
plt.plot(self.profileDomain, self.profileRange)
plt.show()
|
mit
|
raymondxyang/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
|
153
|
6723
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
|
apache-2.0
|
ikaee/bfr-attendant
|
facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py
|
62
|
3753
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ElDeveloper/qiita
|
qiita_db/test/test_analysis.py
|
3
|
25985
|
from unittest import TestCase, main
from os import remove
from os.path import exists, join, basename
from shutil import move
from biom import load_table
from pandas.util.testing import assert_frame_equal
from functools import partial
from qiita_core.util import qiita_test_checker
from qiita_core.testing import wait_for_processing_job
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
from json import dumps
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestAnalysis(TestCase):
def setUp(self):
self.analysis = qdb.analysis.Analysis(1)
self.portal = qiita_config.portal
_, self.fp = qdb.util.get_mountpoint("analysis")[0]
self.get_fp = partial(join, self.fp)
self.biom_fp = self.get_fp("1_analysis_dt-18S_r-1_c-3.biom")
self._old_portal = qiita_config.portal
self.table_fp = None
# fullpaths for testing
self.duplicated_samples_not_merged = self.get_fp(
"not_merged_samples.txt")
self.map_exp_fp = self.get_fp("1_analysis_mapping_exp.txt")
from glob import glob
conf_files = glob(join(qiita_config.plugin_dir, "BIOM*.conf"))
for i, fp in enumerate(conf_files):
qdb.software.Software.from_file(fp, update=True)
def tearDown(self):
self.analysis.artifacts[0].visibility = 'private'
qiita_config.portal = self.portal
with open(self.biom_fp, 'w') as f:
f.write("")
fp = self.get_fp('testfile.txt')
if exists(fp):
remove(fp)
if self.table_fp:
mp = qdb.util.get_mountpoint("processed_data")[0][1]
if exists(self.table_fp):
move(self.table_fp,
join(mp, "2_study_1001_closed_reference_otu_table.biom"))
qiita_config.portal = self._old_portal
def _wait_for_jobs(self, analysis):
for j in analysis.jobs:
wait_for_processing_job(j.id)
if j.status == 'error':
print(j.log.msg)
def _create_analyses_with_samples(self, user='[email protected]',
merge=False):
"""Aux function to create an analysis with samples
Parameters
----------
user : qiita_db.user.User, optional
The user email to attach to the analysis. Default: [email protected]
merge : bool, optional
Merge duplicated ids or not
Returns
-------
qiita_db.analysis.Analysis
Notes
-----
Replicates the samples contained in Analysis(1) at the moment of
creation of this function (September 15, 2016)
"""
user = qdb.user.User(user)
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=merge)
self._wait_for_jobs(new)
return new
def test_lock_samples(self):
dflt = qdb.user.User('[email protected]').default_analysis
# The default analysis can have samples added/removed
dflt._lock_samples()
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis(1)._lock_samples()
def test_get_by_status(self):
qiita_config.portal = 'QIITA'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'QIITA'
self.analysis.artifacts[0].visibility = 'public'
self.assertEqual(qdb.analysis.Analysis.get_by_status('public'),
{self.analysis})
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
def test_can_be_publicized(self):
analysis = qdb.analysis.Analysis(1)
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
a4 = qdb.artifact.Artifact(4)
a4.visibility = 'public'
self.assertEqual(analysis.can_be_publicized, (True, []))
a4.visibility = 'private'
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
def test_add_artifact(self):
obs = self._create_analyses_with_samples()
exp = qdb.artifact.Artifact(4)
obs.add_artifact(exp)
self.assertIn(exp, obs.artifacts)
def test_has_access_public(self):
analysis = self._create_analyses_with_samples("[email protected]")
analysis.artifacts[0].visibility = 'public'
qiita_config.portal = 'QIITA'
self.assertTrue(
analysis.has_access(qdb.user.User("[email protected]")))
qiita_config.portal = 'EMP'
self.assertFalse(
analysis.has_access(qdb.user.User("[email protected]")))
def test_has_access_shared(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("[email protected]")))
def test_has_access_private(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("[email protected]")))
def test_has_access_admin(self):
qiita_config.portal = 'QIITA'
self.assertTrue(
self.analysis.has_access(qdb.user.User("[email protected]")))
qiita_config.portal = 'EMP'
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.analysis.Analysis(1).has_access(qdb.user.User("[email protected]"))
def test_has_access_no_access(self):
self.assertFalse(
self.analysis.has_access(qdb.user.User("[email protected]")))
def test_can_edit(self):
a = qdb.analysis.Analysis(1)
self.assertTrue(a.can_edit(qdb.user.User('[email protected]')))
self.assertTrue(a.can_edit(qdb.user.User('[email protected]')))
self.assertTrue(a.can_edit(qdb.user.User('[email protected]')))
self.assertFalse(a.can_edit(qdb.user.User('[email protected]')))
def test_create_nonqiita_portal(self):
qiita_config.portal = "EMP"
obs = qdb.analysis.Analysis.create(
qdb.user.User("[email protected]"), "newAnalysis", "A New Analysis")
# make sure portal is associated
self.assertCountEqual(obs._portals, ["QIITA", "EMP"])
def test_create_from_default(self):
with qdb.sql_connection.TRN:
sql = "SELECT NOW()"
qdb.sql_connection.TRN.add(sql)
time1 = qdb.sql_connection.TRN.execute_fetchlast()
owner = qdb.user.User("[email protected]")
obs = qdb.analysis.Analysis.create(
owner, "newAnalysis", "A New Analysis", from_default=True)
self.assertEqual(obs.owner, owner)
self.assertEqual(obs.name, "newAnalysis")
self.assertEqual(obs._portals, ["QIITA"])
self.assertLess(time1, obs.timestamp)
self.assertEqual(obs.description, "A New Analysis")
self.assertCountEqual(obs.samples, [4])
self.assertCountEqual(
obs.samples[4], ['1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'])
self.assertEqual(obs.data_types, ['18S'])
self.assertEqual(obs.shared_with, [])
self.assertEqual(obs.mapping_file, None)
self.assertEqual(obs.tgz, None)
self.assertNotEqual(obs.jobs, [])
self.assertEqual(obs.pmid, None)
def test_exists(self):
qiita_config.portal = 'QIITA'
self.assertTrue(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
qiita_config.portal = 'EMP'
self.assertFalse(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
def test_delete(self):
# successful delete
new = qdb.analysis.Analysis.create(
qdb.user.User('[email protected]'), "newAnalysis",
"A New Analysis")
self.assertTrue(qdb.analysis.Analysis.exists(new.id))
qdb.analysis.Analysis.delete(new.id)
self.assertFalse(qdb.analysis.Analysis.exists(new.id))
# no possible to delete
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBUnknownIDError):
qdb.analysis.Analysis.delete(new.id)
# Analysis with artifacts
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis.delete(1)
def test_retrieve_owner(self):
self.assertEqual(self.analysis.owner, qdb.user.User("[email protected]"))
def test_retrieve_name(self):
self.assertEqual(self.analysis.name, "SomeAnalysis")
def test_retrieve_description(self):
self.assertEqual(self.analysis.description, "A test analysis")
def test_set_description(self):
self.analysis.description = "New description"
self.assertEqual(self.analysis.description, "New description")
def test_retrieve_samples(self):
exp = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']}
self.assertCountEqual(self.analysis.samples, exp)
def test_retrieve_portal(self):
self.assertEqual(self.analysis._portals, ["QIITA"])
def test_retrieve_data_types(self):
exp = ['18S', '16S']
self.assertCountEqual(self.analysis.data_types, exp)
def test_retrieve_shared_with(self):
self.assertEqual(self.analysis.shared_with,
[qdb.user.User("[email protected]")])
def test_retrieve_jobs(self):
self.assertEqual(self.analysis.jobs, [])
def test_retrieve_pmid(self):
self.assertEqual(self.analysis.pmid, "121112")
def test_set_pmid(self):
new = self._create_analyses_with_samples("[email protected]")
self.assertIsNone(new.pmid)
new.pmid = "11211221212213"
self.assertEqual(new.pmid, "11211221212213")
def test_retrieve_mapping_file(self):
exp = join(self.fp, "1_analysis_mapping.txt")
obs = self.analysis.mapping_file
self.assertIsNotNone(obs)
self.assertEqual(
qdb.util.get_filepath_information(obs)['fullpath'], exp)
self.assertTrue(exists(exp))
def test_retrieve_tgz(self):
# generating here as the tgz is only generated once the analysis runs
# to completion (un)successfully
analysis = self._create_analyses_with_samples("[email protected]")
fp = self.get_fp('test.tgz')
with open(fp, 'w') as f:
f.write('')
analysis._add_file(fp, 'tgz')
self.assertEqual(analysis.tgz, fp)
def test_retrieve_tgz_none(self):
self.assertIsNone(self.analysis.tgz)
def test_summary_data(self):
obs = self.analysis.summary_data()
exp = {'studies': 1,
'artifacts': 3,
'samples': 5}
self.assertEqual(obs, exp)
def test_add_remove_samples(self):
analysis = qdb.user.User('[email protected]').default_analysis
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
analysis.add_samples(exp)
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(artifacts=(qdb.artifact.Artifact(4), ),
samples=('1.SKB8.640193', ))
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(samples=('1.SKD8.640184', ))
exp = {4: ['1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180']}
self.assertCountEqual(analysis.samples, exp)
analysis.remove_samples(
artifacts=(qdb.artifact.Artifact(4), qdb.artifact.Artifact(5)))
exp = {6: {'1.SKB7.640196', '1.SKB8.640193',
'1.SKM4.640180', '1.SKM9.640192'}}
self.assertCountEqual(analysis.samples, exp)
def test_share_unshare(self):
analysis = self._create_analyses_with_samples()
user = qdb.user.User("[email protected]")
self.assertEqual(analysis.shared_with, [])
analysis.share(user)
exp = [user]
self.assertEqual(analysis.shared_with, exp)
analysis.unshare(user)
self.assertEqual(analysis.shared_with, [])
def test_build_mapping_file(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples)
obs = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
exp = self.get_fp("%s_analysis_mapping.txt" % analysis.id)
self.assertEqual(obs, exp)
obs = qdb.metadata_template.util.load_template_to_dataframe(
obs, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_no_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples, True)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.duplicated_samples_not_merged, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
analysis._build_mapping_file(samples)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_biom_tables(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
biom_fp = self.get_fp(
"%s_analysis_18S_algorithm.biom" % analysis.id)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
self.assertEqual(obs, [('18S', basename(biom_fp))])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
self.assertEqual(obs, exp)
def test_build_biom_tables_with_references(self):
analysis = self._create_analyses_with_samples()
analysis_id = analysis.id
grouped_samples = {
('18S || Pick closed-reference OTUs (reference: 1) | '
'Split libraries FASTQ'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('18S || Pick closed-reference OTUs (reference: 1) | '
'Trim (lenght: 150)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('16S || Pick closed-reference OTUs (reference: 2) | '
'Trim (lenght: 100)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
exp = [
('16S', '%s_analysis_16S_PickclosedreferenceOTUsreference2'
'Trimlenght100.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'SplitlibrariesFASTQ.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'Trimlenght150.biom' % analysis_id)]
self.assertCountEqual(obs, exp)
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
for dt, fp, _ in obs_bioms:
table = load_table(fp)
obs = set(table.ids(axis='sample'))
self.assertEqual(obs, exp)
def test_build_biom_tables_duplicated_samples_not_merge(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples, True)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
biom_fp = (
"%s_analysis_18S_algorithm.biom" % analysis.id)
self.assertEqual(obs, [('18S', biom_fp)])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'4.1.SKD8.640184', '4.1.SKB7.640196', '4.1.SKB8.640193',
'5.1.SKB8.640193', '5.1.SKB7.640196', '5.1.SKD8.640184'}
self.assertCountEqual(obs, exp)
def test_build_biom_tables_raise_error_due_to_sample_selection(self):
grouped_samples = {
'18S || algorithm': [
(4, ['sample_name_1', 'sample_name_2', 'sample_name_3'])]}
with self.assertRaises(RuntimeError):
self.analysis._build_biom_tables(grouped_samples)
def test_build_files(self):
analysis = self._create_analyses_with_samples()
biom_tables = analysis.build_files(True)
# testing that the generated files have the same sample ids
biom_fp = biom_tables[0][1]
biom_ids = load_table(biom_fp).ids(axis='sample')
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184',
'1.SKB8.640193', '1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_build_files_post_processing_cmd(self):
tmp = qdb.artifact.Artifact(4).processing_parameters.command
cmd_id = tmp.id
# set a known artifact's additional processing command
# to a known value. Then test for it.
# qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs.
results = {}
results['script_env'] = 'source deactivate; source activate qiita;'
results['script_path'] = 'qiita_db/test/support_files/worker.py'
# no additional parameters are needed for worker.py
# fp_biom and fp_archive will be generated by build_files()
results['script_params'] = {}
# convert to json representation and store in PostgreSQL
results = dumps(results)
sql = """UPDATE qiita.software_command
SET post_processing_cmd = %s
WHERE command_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [results, cmd_id])
# create a sample analysis and run build_files on it.
analysis = self._create_analyses_with_samples()
biom_files = analysis.build_files(False)
# if build_files used additional processing commands, it will
# return a couple of tuples, where the third element contains
# output archive-artifact data.
self.assertEqual(2, len(biom_files))
aid = analysis.id
exp = [('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, None),
('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, 'archive_%d.tre' % aid)]
obs = [(basename(fp1),
basename(fp2) if fp2 is not None else None)
for _, fp1, fp2 in biom_files]
self.assertEqual(obs, exp)
# cleanup (assume command was NULL previously)
sql = """UPDATE qiita.software_command
SET post_processing_cmd = NULL
WHERE command_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [cmd_id])
def test_build_files_merge_duplicated_sample_ids(self):
user = qdb.user.User("[email protected]")
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKD8.640184'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=True)
self._wait_for_jobs(new)
biom_tables = new.build_files(False)
# testing that the generated files have the same sample ids
biom_ids = []
for _, fp, _ in biom_tables:
biom_ids.extend(load_table(fp).ids(axis='sample'))
mapping_fp = qdb.util.get_filepath_information(
new.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['4.1.SKM9.640192', '4.1.SKM4.640180', '4.1.SKD8.640184',
'4.1.SKB8.640193', '4.1.SKB7.640196',
'5.1.SKM9.640192', '5.1.SKM4.640180', '5.1.SKD8.640184',
'5.1.SKB8.640193', '5.1.SKB7.640196',
'6.1.SKM9.640192', '6.1.SKM4.640180', '6.1.SKD8.640184',
'6.1.SKB8.640193', '6.1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_add_file(self):
# Tested indirectly through build_files
pass
def test_is_public_make_public(self):
analysis = self._create_analyses_with_samples()
self.assertFalse(analysis.is_public)
# testing errors
with self.assertRaises(ValueError):
analysis.make_public()
# testing successfully making public
# 4 is the only artifact being used in _create_analyses_with_samples
qdb.artifact.Artifact(4).visibility = 'public'
analysis.make_public()
self.assertTrue(analysis.is_public)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
kiyoto/statsmodels
|
docs/source/plots/graphics_gofplots_qqplot.py
|
38
|
1911
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
|
bsd-3-clause
|
plissonf/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
110
|
34127
|
from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
|
bsd-3-clause
|
zhenv5/scikit-learn
|
examples/linear_model/plot_sgd_loss_functions.py
|
249
|
1095
|
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
|
bsd-3-clause
|
mariusvniekerk/ibis
|
ibis/client.py
|
4
|
13243
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.compat import zip as czip
from ibis.config import options
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.sql.compiler as comp
import ibis.common as com
import ibis.util as util
class Client(object):
pass
class Query(object):
"""
Abstraction for DDL query execution to enable both synchronous and
asynchronous queries, progress, cancellation and more (for backends
supporting such functionality).
"""
def __init__(self, client, ddl):
self.client = client
if isinstance(ddl, comp.DDL):
self.compiled_ddl = ddl.compile()
else:
self.compiled_ddl = ddl
self.result_wrapper = getattr(ddl, 'result_handler', None)
def execute(self):
# synchronous by default
with self.client._execute(self.compiled_ddl, results=True) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
def _wrap_result(self, result):
if self.result_wrapper is not None:
result = self.result_wrapper(result)
return result
def _fetch(self, cursor):
import pandas as pd
rows = cursor.fetchall()
# TODO(wesm): please evaluate/reimpl to optimize for perf/memory
dtypes = [self._db_type_to_dtype(x[1]) for x in cursor.description]
names = [x[0] for x in cursor.description]
cols = {}
for (col, name, dtype) in czip(czip(*rows), names, dtypes):
try:
cols[name] = pd.Series(col, dtype=dtype)
except TypeError:
# coercing to specified dtype failed, e.g. NULL vals in int col
cols[name] = pd.Series(col)
return pd.DataFrame(cols, columns=names)
def _db_type_to_dtype(self, db_type):
raise NotImplementedError
class AsyncQuery(Query):
"""
Abstract asynchronous query
"""
def execute(self):
raise NotImplementedError
def is_finished(self):
raise NotImplementedError
def cancel(self):
raise NotImplementedError
def get_result(self):
raise NotImplementedError
class SQLClient(Client):
sync_query = Query
async_query = Query
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
database
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
qualified_name = self._fully_qualified_name(name, database)
schema = self._get_table_schema(qualified_name)
node = ops.DatabaseTable(qualified_name, schema, self)
return self._table_expr_klass(node)
@property
def _table_expr_klass(self):
return ir.TableExpr
@property
def current_database(self):
return self.con.database
def database(self, name=None):
"""
Create a Database object for a given database name that can be used for
exploring and manipulating the objects (tables, functions, views, etc.)
inside
Parameters
----------
name : string
Name of database
Returns
-------
database : Database
"""
# TODO: validate existence of database
if name is None:
name = self.current_database
return self.database_class(name, self)
def _fully_qualified_name(self, name, database):
# XXX
return name
def _execute(self, query, results=False):
cur = self.con.execute(query)
if results:
return cur
else:
cur.release()
def sql(self, query):
"""
Convert a SQL query to an Ibis table expression
Parameters
----------
Returns
-------
table : TableExpr
"""
# Get the schema by adding a LIMIT 0 on to the end of the query. If
# there is already a limit in the query, we find and remove it
limited_query = """\
SELECT *
FROM (
{0}
) t0
LIMIT 0""".format(query)
schema = self._get_schema_using_query(limited_query)
node = ops.SQLQueryResult(query, schema, self)
return ir.TableExpr(node)
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
SQL or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results)
def execute(self, expr, params=None, limit='default', async=False):
"""
Compile and execute Ibis expression using this backend client
interface, returning results in-memory in the appropriate object type
Parameters
----------
expr : Expr
limit : int, default None
For expressions yielding result yets; retrieve at most this number of
values/rows. Overrides any limit already set on the expression.
params : not yet implemented
async : boolean, default False
Returns
-------
output : input type dependent
Table expressions: pandas.DataFrame
Array expressions: pandas.Series
Scalar expressions: Python scalar value
"""
ast = self._build_ast_ensure_limit(expr, limit)
if len(ast.queries) > 1:
raise NotImplementedError
else:
return self._execute_query(ast.queries[0], async=async)
def _execute_query(self, ddl, async=False):
klass = self.async_query if async else self.sync_query
return klass(self, ddl).execute()
def compile(self, expr, params=None, limit=None):
"""
Translate expression to one or more queries according to backend target
Returns
-------
output : single query or list of queries
"""
ast = self._build_ast_ensure_limit(expr, limit)
queries = [query.compile() for query in ast.queries]
return queries[0] if len(queries) == 1 else queries
def _build_ast_ensure_limit(self, expr, limit):
ast = self._build_ast(expr)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(ast.queries):
if (isinstance(query, comp.Select) and
not isinstance(expr, ir.ScalarExpr) and
query.table_set is not None):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {
'n': query_limit,
'offset': 0
}
elif limit is not None and limit != 'default':
query.limit = {'n': limit,
'offset': query.limit['offset']}
return ast
def explain(self, expr):
"""
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
"""
if isinstance(expr, ir.Expr):
ast = self._build_ast(expr)
if len(ast.queries) > 1:
raise Exception('Multi-query expression')
query = ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(util.indent(query, 2),
'\n'.join(result))
def _build_ast(self, expr):
# Implement in clients
raise NotImplementedError
class QueryPipeline(object):
"""
Execute a series of queries, possibly asynchronously, and capture any
result sets generated
Note: No query pipelines have yet been implemented
"""
pass
def execute(expr, limit='default', async=False):
backend = find_backend(expr)
return backend.execute(expr, limit=limit, async=async)
def compile(expr, limit=None):
backend = find_backend(expr)
return backend.compile(expr, limit=limit)
def find_backend(expr):
backends = []
def walk(expr):
node = expr.op()
for arg in node.flat_args():
if isinstance(arg, Client):
backends.append(arg)
elif isinstance(arg, ir.Expr):
walk(arg)
walk(expr)
backends = util.unique_by_key(backends, id)
if len(backends) > 1:
raise ValueError('Multiple backends found')
elif len(backends) == 0:
default = options.default_backend
if default is None:
raise com.IbisError('Expression depends on no backends, '
'and found no default')
return default
return backends[0]
class Database(object):
def __init__(self, name, client):
self.name = name
self.client = client
def __repr__(self):
return "{0}('{1}')".format('Database', self.name)
def __dir__(self):
attrs = dir(type(self))
unqualified_tables = [self._unqualify(x) for x in self.tables]
return list(sorted(set(attrs + unqualified_tables)))
def __contains__(self, key):
return key in self.tables
@property
def tables(self):
return self.list_tables()
def __getitem__(self, key):
return self.table(key)
def __getattr__(self, key):
special_attrs = ['_ipython_display_', 'trait_names',
'_getAttributeNames']
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in special_attrs:
raise
return self.table(key)
def _qualify(self, value):
return value
def _unqualify(self, value):
return value
def drop(self, force=False):
"""
Drop the database
Parameters
----------
drop : boolean, default False
Drop any objects if they exist, and do not fail if the databaes does
not exist
"""
self.client.drop_database(self.name, force=force)
def namespace(self, ns):
"""
Creates a derived Database instance for collections of objects having a
common prefix. For example, for tables fooa, foob, and fooc, creating
the "foo" namespace would enable you to reference those objects as a,
b, and c, respectively.
Returns
-------
ns : DatabaseNamespace
"""
return DatabaseNamespace(self, ns)
def table(self, name):
"""
Return a table expression referencing a table in this database
Returns
-------
table : TableExpr
"""
qualified_name = self._qualify(name)
return self.client.table(qualified_name, self.name)
def list_tables(self, like=None):
return self.client.list_tables(like=self._qualify_like(like),
database=self.name)
def _qualify_like(self, like):
return like
class DatabaseNamespace(Database):
def __init__(self, parent, namespace):
self.parent = parent
self.namespace = namespace
def __repr__(self):
return ("{0}(database={1!r}, namespace={2!r})"
.format('DatabaseNamespace', self.name, self.namespace))
@property
def client(self):
return self.parent.client
@property
def name(self):
return self.parent.name
def _qualify(self, value):
return self.namespace + value
def _unqualify(self, value):
return value.replace(self.namespace, '', 1)
def _qualify_like(self, like):
if like:
return self.namespace + like
else:
return '{0}*'.format(self.namespace)
class DatabaseEntity(object):
pass
class View(DatabaseEntity):
def drop(self):
pass
|
apache-2.0
|
akrherz/dep
|
scripts/ucs/top25counties.py
|
2
|
1131
|
"""Go."""
import sys
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn
pgconn = get_dbconn("idep")
df = read_sql(
"""
with yearly as (
SELECT huc_12, extract(year from valid) as yr,
sum(avg_loss) from results_by_huc12 where scenario = 0
GROUP by huc_12, yr),
agg as (
SELECT huc_12, avg(sum) from yearly GROUP by huc_12)
SELECT st_x(st_centroid(st_transform(geom, 4326))),
st_y(st_centroid(st_transform(geom, 4326))), a.huc_12, avg
from huc12 h JOIN agg a on (h.huc_12 = a.huc_12) WHERE
h.states ~* 'IA' ORDER by avg DESC
""",
pgconn,
index_col="huc_12",
)
pgconn = get_dbconn("postgis")
cursor = pgconn.cursor()
DONE = []
for i, row in df.iterrows():
cursor.execute(
"""SELECT name from ugcs where end_ts is null and
state = 'IA' and substr(ugc, 3, 1) = 'C' and
ST_Contains(geom, St_SetSRID(ST_Point(%s, %s), 4326))
""",
(row["st_x"], row["st_y"]),
)
name = cursor.fetchone()[0]
if name not in DONE:
print(name)
DONE.append(name)
if len(DONE) == 25:
sys.exit()
|
mit
|
nomadcube/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
wq/wq.io
|
itertable/gis/mixins.py
|
1
|
4315
|
import fiona
from shapely import wkt, geometry
from ..loaders import FileLoader
from ..parsers.base import BaseParser
from ..mappers import TupleMapper
class FionaLoaderParser(FileLoader, BaseParser):
"""
Composite loader & parser mixin for GIS data, powered by Fiona
"""
layer_id = None
meta = {}
key_field = 'id'
def load(self):
try:
self.layers = fiona.listlayers(self.filename)
except (ValueError, IOError):
driver = guess_driver(self.filename)
self.meta = {'driver': driver}
self.empty_file = True
def parse(self):
# If multiple layers, parse all of them (!)
if len(self.layers) > 1 and self.layer_id is None:
cls = type(self)
self.data = [{
'id': id,
'name': name,
'data': cls(filename=self.filename, layer_id=id)
} for id, name in enumerate(self.layers)]
else:
# One layer, load & parse GIS data
with fiona.open(self.filename, layer=self.layer_id) as f:
self.meta = f.meta
if 'id' in f.meta.get('schema', {}).get('properties', {}):
# TODO: Is this correct?
del f.meta['schema']['properties']['id']
self.data = list(map(self.parse_feature, f))
def parse_feature(self, f):
# Flatten Fiona's GeoJSON-style representation into something more
# amenable to namedtuple-ing
feat = {key: value for key, value in f['properties'].items()}
if 'id' not in feat and 'ID' not in feat:
feat['id'] = f['id']
feat['geometry'] = f['geometry']
return feat
def dump_feature(self, feat, i):
# Undo aforementioned flattening
return {
'id': feat.get('id', feat.get('ID', i)),
'geometry': feat['geometry'],
'properties': {
key: value for key, value in feat.items()
if key not in ('geometry', 'id',)
}
}
def dump(self):
# Dump and save the dataset at the same time via Fiona
pass
def save(self):
with fiona.open(self.filename, 'w', **self.meta) as f:
for i, feat in enumerate(self.data):
f.write(self.dump_feature(feat, i))
class GisMapper(TupleMapper):
"""
GIS-aware tuple mapper
"""
def as_dataframe(self):
# Mimic BaseIter.as_dataframe() but with GeoDataFrame
# (also, key_field is always set)
from geopandas import GeoDataFrame
key = self.get_key_field()
data = [self.item_dict(row) for row in self.values()]
df = GeoDataFrame(data)
df.set_index(key, inplace=True)
return df
def item_dict(self, uitem):
# Turn usable item into GeoDataFrame-friendly dict
data = uitem._asdict()
data['geometry'] = geometry.shape(data['geometry'])
return data
class ShapeMapper(GisMapper):
"""
Map Fiona's GeoJSON-style geometries to and from Shapely shapes
"""
def map_value(self, field, value):
value = super(ShapeMapper, self).map_value(field, value)
if field == 'geometry':
value = geometry.shape(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = geometry.mapping(value)
return super(ShapeMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
return uitem._asdict()
class WktMapper(ShapeMapper):
"""
Map geometries to and from WKT (good for Django integration)
"""
def map_value(self, field, value):
value = super(WktMapper, self).map_value(field, value)
if field == 'geometry':
value = wkt.dumps(value)
return value
def unmap_value(self, field, value):
if field == 'geometry':
value = wkt.loads(value)
return super(WktMapper, self).unmap_value(field, value)
def item_dict(self, uitem):
data = uitem._asdict()
data['geometry'] = wkt.loads(data['geometry'])
return data
def guess_driver(filename):
if filename.endswith(".shp"):
return "ESRI Shapefile"
else:
return "GeoJSON"
|
mit
|
sleepingZ/KerosPore
|
Adsorption/ensembleDensity.py
|
1
|
22056
|
# -*- coding: utf-8 -*-
import os
from dump import dump
import numpy as np
import copy
class Density():
def __init__(self,config,adsfile = 'ads_lj.lammpstrj',\
mode = 'new', path = 'densityTemp'):
if mode == 'new':
try:
os.mkdir('densityTemp')
except:
pass
os.chdir('./densityTemp')
else:
os.chdir(path)
f = open('V_Rho.info','r')
lines = f.readlines()
self.frame = int(lines[0].strip().split(':')[1])
self.mesh = int(lines[1].strip().split(':')[1])
self.R_ave = float(lines[2].strip().split(':')[1])
self.halflength = float(lines[3].strip().split(':')[1])
self.mode = mode
self.path = path
self.config = config
self.adsfile = '../'+adsfile
self.engine = config['engine']+'<'
self.dumpfile = 'ensembles.lammpstrj'
self.coordnum = 'dump.coordnum'
def finalize(self):#Must be called when a new Density class is about to construct
if self.mode == 'new':
f = open('V_Rho.info','w')
f.write('frame:%d\n'%self.frame)
f.write('mesh:%d\n'%self.mesh)
f.write('R_ave:%.2f\n'%self.R_ave)
f.write('halflength:%.2f\n'%self.halflength)
f.close()
os.chdir('..')
out_folder = 'VRho:%s'%(self.path)
try:
os.system('mv densityTemp %s'%out_folder)
except:
print "%s already exists, the results are in %s(NEW)"%(out_folder,out_folder)
os.system('mv densityTemp %s'%(out_folder+'(NEW)'))
else:
os.chdir('..')
def densityAccum(self,atomID=8,frame=100,equil=30000):
d = dump(self.adsfile)
times = d.time()
times = [time for time in times if time>=equil]
gap = times[1]-times[0]
gap_need = (times[-1]-times[0])/frame
if gap_need/gap >= 1:
skip = gap_need/gap
d.tselect.test("$t>=%d"%equil)
d.delete()
d.tselect.skip(skip)
self.frame = len(d.time())
d.aselect.all()
d.aselect.test("$type == %d"%atomID)
d.set("$type = 1")
d.write('temp.ensembles.lammpstrj',0)
f = open('temp.ensembles.lammpstrj','r')
lines = f.readlines()
lines_seq = [line.split() for line in lines]
f.close()
fp = open('ensembles.lammpstrj','w')
fp.write('ITEM: TIMESTEP\n')
fp.write('0\n')
fp.write('ITEM: NUMBER OF ATOMS\n')
fp.write('%d\n' % len(lines_seq))
fp.write('ITEM: BOX BOUNDS ff ff ff\n')
x = [float(item[2]) for item in lines_seq]
y = [float(item[3]) for item in lines_seq]
z = [float(item[4]) for item in lines_seq]
xmax, xmin = np.max(x), np.min(x)
ymax, ymin = np.max(y), np.min(y)
zmax, zmin = np.max(z), np.min(z)
fp.write('%.2f %.2f\n' % (xmin,xmax))
fp.write('%.2f %.2f\n' % (ymin,ymax))
fp.write('%.2f %.2f\n' % (zmin,zmax))
fp.write('ITEM: ATOMS id type x y z \n')
for i in range(len(lines_seq)):
c = lines_seq[i][2:5]
fp.write('%d %d %s %s %s\n' % (i+1,1,c[0],c[1],c[2]))
fp.close()
os.system('rm -f temp.ensembles.lammpstrj')
def density1D_radial(self,radius,dumpfile='ensembles.lammpstrj',mesh=100):
d = dump(dumpfile)
t = d.time()[0]
x,y,z = d.vecs(t,'x','y','z')
coord = [[x[i],y[i],z[i]] for i in range(len(x))]
r = np.linspace(0,radius,mesh,endpoint=False)
coord_v = []
for item in coord:
v = [float(s) for s in item]
rs = np.linalg.norm(v)
coord_v.append(rs)
hist = np.histogram(coord_v,r)
self.radial_r = hist[1][:-1]
self.radial_hist = hist[0]
def coordNum(self,\
halflength,mesh,cut_off,dumpfile='ensembles.lammpstrj'):
import lmp2py
self.halflength = halflength
self.mesh = mesh
self.R_ave = cut_off
coordnum_template = '%s/coordnum.lmpT'\
%self.config['ads_script_path']
d = dump(dumpfile)
d.tselect.all()
dump_time = d.time()
cmd_list = lmp2py.lmp2list(coordnum_template)
spacing = float(halflength)/float(mesh)
for item in cmd_list:
if item[0] == 'lattice':
item[2] = str(spacing)
if item[0] == 'region':
item[3] = str(-halflength)
item[4] = str(halflength)
item[5] = str(-halflength)
item[6] = str(halflength)
item[7] = str(-halflength)
item[8] = str(halflength)
if item[0] == 'read_dump':
item[2] = str(dump_time[0])
item[1] =dumpfile
if item[0] == 'compute':
item[4] = str(cut_off)
cmd_text = lmp2py.list2str(cmd_list)
lmp2py.str2file(cmd_text,'coordnum')
os.system(self.engine+'coordnum.lmp')
os.system('mv log.lammps log.coordnum')
def readCoordfile(self):
self.hist_list = []
d = dump(self.coordnum)
d.tselect.all()
hist_list = d.vecs(0,'c_DENSITY')
hist_list = [h for h in hist_list if h!=0]
self.hist_list = hist_list
def evalHist(self,mesh=100):
hist_list = self.hist_list
frame = self.frame
d_mesh = mesh
d_max = frame
d_range = np.linspace(0,d_max,d_mesh)
hist = np.histogram(hist_list,d_range)
self.data_x = hist[1][:-1]
self.data_hist = hist[0]
def V_rho_Spectrum(self,plot='yes'):
import matplotlib.pyplot as plt
mesh_all = (2.0*self.mesh)**3
self.data_hist = np.divide(self.data_hist,mesh_all)
plt.rc('font', family='serif')
plt.rc('legend',numpoints=1)
plt.figure(figsize=(5,3))
ax = plt.subplot(111)
ax.set_position([0.2,0.2,0.75,0.75])
Rho = np.divide(self.data_x,self.frame)
V = self.data_hist
RhoV = np.multiply(V,Rho)
rho_free_candi = []
for i in range(1,len(Rho)-1):
if RhoV[i-1]<RhoV[i] and RhoV[i]>RhoV[i+1]:#include peak points
dRhoV = min(RhoV[i]-RhoV[i-1],RhoV[i]-RhoV[i+1])
if dRhoV/RhoV[i]<0.1:#exclude spur points
rho_free_candi.append(Rho[i])
if len(rho_free_candi)==0:
rho_free_candi.append(0.0)
rho_ad_candi = []
for i in range(1,len(Rho)-1):
if RhoV[i-1]>RhoV[i] and RhoV[i]<RhoV[i+1]:#include valley points
dRhoV = min(RhoV[i-1]-RhoV[i],RhoV[i+1]-RhoV[i])
if dRhoV/(RhoV[i]+0.0001)<0.1:#exclude spur points
rho_ad_candi.append(Rho[i])
if len(rho_ad_candi)==0:
rho_ad_candi.append(rho_free_candi[0]*3.0)
rho_ad_candi = [r for r in rho_ad_candi if r>rho_free_candi[0]]
if len(rho_ad_candi)==0:
rho_ad_candi.append(rho_free_candi[0]*3.0)
rho_ad = rho_ad_candi[0]
self.RhoFree = rho_free_candi[0]
self.RhoAd = rho_ad
self.Rho = Rho
self.V = V
self.RhoV = RhoV
ax.plot(Rho,V,'ko-',label = r'$V(\rho_E)$')
ax.plot(Rho,RhoV,'bs-',label = r'$M(\rho_E)$')
plt.legend()
plt.xlabel(r'$\rho_E$',fontsize=20)
plt.ylabel(r'amplitude',fontsize=20)
if plot == 'yes':
print "Rho_ad_candidates:"
print rho_ad_candi
print "Rho_free_candidates:"
print rho_free_candi
plt.show()
def adsAnalysis(self,mode = 'self',rhoFreeRef = 0.0,\
bonus=0.2):
if mode == 'self':
RhoFree=self.RhoFree*(1.+bonus)
else:
self.RhoFree=rhoFreeRef
RhoFree=rhoFreeRef*(1.+bonus)
RhoV_sum = np.sum(self.RhoV)
V_sum = np.sum(self.V)
num = len(self.Rho)
Rho = self.Rho
RhoV_ads = np.sum([self.RhoV[i] for i in range(num) if Rho[i]>RhoFree])
V_ads = np.sum([self.V[i] for i in range(num) if Rho[i]>RhoFree])
self.RhoV_ads = RhoV_ads/RhoV_sum
self.V_ads = V_ads/V_sum
def V_rho_SpectrumComparison(self,compData,refs,xlim=0.7):
# f=open('data_regularPore_2.0','w')
# f.write('rho_E,V_F')
import matplotlib.pyplot as plt
mesh_all = (2.0*self.mesh)**3
self.data_hist = np.divide(self.data_hist,mesh_all)
plt.rc('font', family='serif')
plt.rc('legend',numpoints=1)
plt.figure(figsize=(5,6))
ax = []
ax.append(plt.subplot(211))
ax[0].set_position([0.2,0.6,0.7,0.38])
for i in range(len(compData)):
data, ref = compData[i], refs[i]
mesh_data = (2.0*data.mesh)**3
data.data_hist = np.divide(data.data_hist,mesh_data)
ax[0].plot(np.divide(data.data_x,data.frame),\
np.multiply(data.data_hist,1),'o-',\
label=r"$\varepsilon_w = %s$"%ref)
# for j in range(len(np.divide(data.data_x,data.frame))):
# f.write('%.2f,%.6f\n'%(np.divide(data.data_x,data.frame)[j],data.data_hist[j]))
plt.xlabel(r'$\rho_E$',fontsize=20)
plt.ylabel(r'$V_F$',fontsize=20)
plt.xlim(xmax=xlim)
plt.legend()
ax.append(plt.subplot(212))
ax[1].set_position([0.2,0.1,0.7,0.38])
# ax[1].plot(np.divide(self.data_x,self.frame),self.data_hist,\
# 'ko-',label="Kerogen Pore")
ax[1].plot(np.divide(self.data_x,self.frame),np.multiply(self.data_hist,1),\
'ko-',label="Kerogen Pore")
plt.legend()
plt.xlabel(r'$\rho_E$',fontsize=20)
plt.ylabel(r'$V_F$',fontsize=20)
plt.xlim(xmax=xlim)
ylim0 = ax[0].get_ylim()[1]
ylim1 = ax[1].get_ylim()[1]
ylim = max([ylim0,ylim1])
ax[0].set_ylim((0,ylim))
ax[1].set_ylim((0,ylim))
plt.show()
# f.close()
# g=open('data_kerogenPore','w')
# g.write('rho_E,V_F')
# for j in range(len(np.divide(self.data_x,self.frame))):
# g.write('%.2f,%.6f\n'%(np.divide(self.data_x,self.frame)[j],self.data_hist[j]))
# g.close()
def visualDensity(self, vis = 'no', phi = 0.01):
"""
visualDensity must be invoked after coordNum().
The equatorial plane is the x-y plane.
phi is the longitude angle.
"""
d = dump(self.coordnum)
d.aselect.all()
time = d.time()[0]
x, y, z, rho = d.vecs(time,"x","y","z","c_DENSITY")
N = len(x)
x_temp = sorted(x)
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
zmin, zmax = np.min(z), np.max(z)
for i in range(N):
if x_temp[i]!=xmin:
x_next = x_temp[i]
break
spacing = x_next - xmin
rho = np.divide(rho,self.frame*spacing**3)
pixels = []
pixel_x = range(int(round(xmin/spacing)),\
int(round(xmax/spacing))+1)
pixel_y = range(int(round(ymin/spacing)),\
int(round(ymax/spacing))+1)
pixel_z = range(int(round(zmin/spacing)),\
int(round(zmax/spacing))+1)
center = [pixel_x[len(pixel_x)/2],\
pixel_y[len(pixel_y)/2],\
pixel_z[len(pixel_z)/2]]
for i in range(N):
pixels.append([int(round(x[i]/spacing)),\
int(round(y[i]/spacing)),\
int(round(z[i]/spacing))])
p_xmin, p_xmax = np.min(pixel_x), np.max(pixel_x)
p_ymin, p_ymax = np.min(pixel_y), np.max(pixel_y)
xs = center[0]-int((center[1]-p_ymin)/np.tan(phi))
if xs < p_xmin:
ys = center[1]-int((center[0]-p_xmin)*np.tan(phi))
start = [p_xmin,ys]
ye = center[1]-int((center[0]-p_xmax)*np.tan(phi))
end = [p_xmax,ye]
else:
start = [xs,p_ymin]
xe = center[0]-int((center[1]-p_ymax)/np.tan(phi))
end = [xe,p_ymax]
def Bresenham(start,end):
dx = end[0]-start[0]
dy = end[1]-start[1]
e = -dx
x, y = start[0],start[1]
res = []
for i in range(dx):
res.append([x,y])
x += 1
e = e + 2*dy
if e>=0:
y += 1
e = e - 2*dy
return res
plane_xy = Bresenham(start,end)
slope = float(end[1]-start[1])/(end[0]-start[0])+0.0001
#recover to length with units
coord_start = np.subtract(start,center[:2])*spacing
def lineMap(x0,y0,coord_start,slope):
k,ik = slope, 1.0/slope
xs,ys = coord_start[0],coord_start[1]
xp = (y0-ys+k*xs+ik*x0)/(k+ik)
yp = ys + k*(xp-xs)
return np.linalg.norm(np.subtract([xp,yp],coord_start))
plane = []
for i in range(N):
pixel = pixels[i]
if plane_xy.count(pixel[:2])>0:
x0,y0 = pixel[0]*spacing,pixel[1]*spacing
rxy = lineMap(x0,y0,coord_start,slope)
z = pixel[2]*spacing
plane.append([rxy,z,rho[i]])
import matplotlib.pyplot as plt
plt.rc('font', family='serif')
plt.rc(('xtick','ytick'),labelsize=15)
plane = np.array(plane)
rxy_max, z_max = tuple(np.max(plane,axis=0)[:2])
rxy_min, z_min = tuple(np.min(plane,axis=0)[:2])
grid_rxy,grid_z=np.mgrid[rxy_min:rxy_max:400j,z_min:z_max:400j]
from scipy.interpolate import griddata
grid_rho=griddata(plane[:,:2],plane[:,2],(grid_rxy,grid_z),method='cubic')
if vis == 'no':
self.rxy_min=rxy_min
self.rxy_max=rxy_max
self.zmin=zmin
self.zmax=zmax
return grid_rho
else:
plt.figure(figsize=(8,6))
plt.imshow(grid_rho.T,aspect='equal',\
extent=(rxy_min,rxy_max,z_min,z_max),origin='lower')
plt.show()
class Density_KeroPore(Density):
def __init__(self,arg,pore_path):
"""
pore_path: SOMEPATH/atoms_out
tar_path: SOMEPATH/atoms_out/RDF
The output files are all moved to tar_path when finalize is called
"""
Density.__init__(self)
self.para = arg
datafile_o = 'data.9mol_du_mod'
dumpfile_o = 'dump.atom.lammpstrj'
sphere_center_file = 'sphere_center'
os.system('cp -rf %s/%s %s/%s %s/%s .'%\
(pore_path,datafile_o,pore_path,dumpfile_o,\
pore_path,sphere_center_file))#Copy the origin 3 file to curdir
self.method = 'KeroPore'
self.tarpath = '%s/Density'%pore_path
self.lmpfile = self.method+'.lmp'
self.cmd_mod()
def cmd_mod(self):
fluidspath = '/home/sleepingz/2015Autumn/KeroPore/Adsorption'
os.system('cp -rf %s/fluids .'%fluidspath)
import IsoThermal as IT
fluid = 'methane'
fluidmass = 16.0
ae = 0.29386
asigma = 3.73
from RDF import dump_mod
i = IT.GCMC_lj_ads(fluid=fluid,fluidmass=fluidmass,\
T=self.para['T'],p=[self.para['p']],datafile='data.9mol_du_mod',\
readdump='dump.atom_mod.lammpstrj',sphere_r=self.para['radius'],\
equil=self.para['equil'],nsample=self.para['sample_num'],atom_energy=ae,atom_sigma=asigma)
i.cmd_mod()
self.para['mu'] = i.mu[0]#calculate mu according to p
self.para['sphere_center'] = i.para['sphere_center']
arg={'sphere_r':i.para['sphere_r'],'dump_step':500000}
dump_mod(arg,'dump.atom.lammpstrj')#dump_mod will read the 'sphere_center' file
for cmd in i.cmd_list:
if cmd[:2] == ['fix','GCMC_E']:
cmd[7] = str(i.para['fluid_id'])
cmd[9] = str(i.T)
cmd[10] = str(i.mu[0])
if cmd[0] == 'dump':
cmd[0] = '#dump'
for cmd in i.cmd_list2:
if cmd[:2] == ['fix','GCMC']:
cmd[5] = '10'#Need to be improved
cmd[7] = str(i.para['fluid_id'])
cmd[9] = str(i.T)
cmd[10] = str(i.mu[0])
arg = self.para
cmd_list = i.cmd_list + i.cmd_list2
cmd_list.append(['dump','1','FLUID','custom',str(arg['freq']),'dump.ensembles','id','type','x','y','z'])
cmd_list.append(['run',str(arg['sample_num']*arg['freq'])])
import lmp2py
lmp2py.str2file(lmp2py.list2str(cmd_list),self.method)
class Density_HardWall(Density):
def __init__(self, arg, hw_path='.'):
"""
savemode = 0: do not save the hardwall results
pore_path: path to store the hardwall results, must tell the hw_path
arg.keys = 'radius','sigma','E_W','T','mu','id','sample_num','equil','freq',...
"""
Density.__init__(self)
self.para = arg
self.para['radius_L'] = self.para['radius'] + self.para['sigma']
self.method = 'hardwall'
self.lmpfile = self.method+'.lmp'
self.lmptemplate =\
'/home/sleepingz/2015Autumn/KeroPore/Case:hardwall/poremaker_hardwall.lmp'
self.tarpath = hw_path
self.cmd_mod()
def cmd_mod(self):
arg = self.para
import lmp2py
cmd_list = lmp2py.lmp2list(self.lmptemplate)
for item in cmd_list:
if item[0:2] == ['fix','WALL']:
item[6] = str(arg['E_W'])
item[7] = str(arg['sigma'])
item[8] = str(arg['sigma']*2.5)
if item[0:2] == ['fix','GCMC']:
item[9] = str(arg['T'])
item[10] = str(arg['mu'])
if item[0:2] == ['region','SPHERE']:
item[6] = str(arg['radius'])#between radius and radius_L to capture all adsoption structure
if item[0:2] == ['region','SPHERE_L']:
item[6] = str(arg['radius']+arg['sigma'])
if item[0] == 'dump':
item[0] = '#dump'
if item[0] == 'run':
item[1] = str(arg['equil'])
if item[0] == 'create_atoms':
item[3] = str(arg['N_iso'])
cmd_list.append(['dump','1','all','custom',str(arg['freq']),'dump.ensembles','id','type','x','y','z'])
cmd_list.append(['unfix','GCMC'])
cmd_list.append(['run',str(arg['sample_num']*arg['freq'])])
lmp2py.str2file(lmp2py.list2str(cmd_list),self.method)
class Density_StaticAtoms(Density):
def __init__(self, arg, sa_path='.'):
"""
savemode = 0: do not save the hardwall results
pore_path: path to store the hardwall results, must tell the hw_path
arg.keys = 'radius','sigma','E_W','N_iso','T','id','sample_num','equil','freq',...
"""
Density.__init__(self)
self.para = arg
self.para['radius_L'] = self.para['radius'] + self.para['sigma']
self.method = 'StaticAtoms'
self.lmpfile = self.method+'.lmp'
self.lmptemplate =\
'/home/sleepingz/2015Autumn/KeroPore/Case:hardwall/poremaker_staticAtoms.lmp'
self.tarpath = sa_path
self.cmd_mod()
def cmd_mod(self):
arg = self.para
import lmp2py
cmd_list = lmp2py.lmp2list(self.lmptemplate)
for item in cmd_list:
if item[0:2] == ['region','BOX']:
item[3] = item[5] = item[7] = str(-arg['radius']-10)
item[4] = item[6] = item[8] = str(arg['radius']+10)
if item[0:2] == ['region','SPHERE']:
item[6] = str(arg['radius'])#between radius and radius_L to capture all adsoption structure
if item[0:2] == ['region','SPHERE_L']:
item[6] = str(arg['radius']+arg['sigma'])
if item[0:2] == ['pair_coeff','1']:
item[3] = str(arg['E_W'])
item[4] = str(arg['sigma'])
if item[0:2] == ['fix','EQUIL']:
item[5] = item[6] = str(arg['T'])
if item[0] == 'run':
item[1] = str(arg['equil'])
if item[0:2] == ['create_atoms','2']:
item[3] = str(arg['N_iso'])
cmd_list.append(['dump','1','all','custom',str(arg['freq']),'dump.ensembles','id','type','x','y','z'])
cmd_list.append(['run',str(arg['sample_num']*arg['freq'])])
lmp2py.str2file(lmp2py.list2str(cmd_list),self.method)
#Test:
#arg={'radius':7.0,'sigma':3.7,'E_W':1.0,'T':300.0,'mu':-6.0,'id':1,'sample_num':200,\
# 'equil':10000,'freq':200,'mesh':20,'r_ave':2.0,'sphere_center':[0.0,0.0,0.0]}
#Test1: hardwall
#dh = Density_HardWall(arg)
#dh()
#dh.cmd_mod()
#dh.ensembleAccum()
#dh.density_eval()
#Test2: 3 hardwall, plot
#mu_seq = [-8.0,-7.0,-6.0]
#data = []
#for i in range(len(mu_seq)):
# arg['mu'] = mu_seq[i]
# dh = Density_HardWall(arg)
# dh.cmd_mod()
# dh()
# dh.ensembleAccum()
# dh.density_eval()
# dh.read_coordfile()
# dh.eval_hist()
# data.append({'x':dh.data_x,'hist':dh.data_hist})
# os.chdir('..')
#
#
#layout = 311
#
#import matplotlib.pyplot as plt
#fig = plt.figure(figsize=(8,10))
#ax = []
#for i in range(3):
# ax.append(plt.subplot(layout+i))
# ax[i].plot(data[i]['x'],data[i]['hist'],'bo-')
#Test3: KeroPore prepare
#pore_path = '/home/sleepingz/2015Autumn/KeroPore/Case:dig_atoms/radius_7.0/atoms_out'
#arg['p'] = 1.0
#dk = Density_KeroPore(arg,pore_path)
#dk.cmd_mod()
#dk.ensembleAccum(mode = 1)
#dk.density_eval()
#dk.read_coordfile()
#dk.eval_hist()
#import matplotlib.pyplot as plt
#fig = plt.figure(figsize=(8,10))
#plt.plot(dk.data_x,dk.data_hist,'bo-')
|
gpl-3.0
|
AlexisEidelman/Til
|
til/pgm/depart_retirement.py
|
2
|
1083
|
# -*- coding: utf-8 -*-
import sys
from numpy import maximum, array, ones
from pandas import Series
from utils import output_til_to_liam
from til.pgm.run_pension import run_pension
def depart_retirement(context, yearleg, time_step='year', to_check=False, behavior='taux_plein', cProfile=False):
''' cette fonction renvoie un vecteur de booleens indiquant les personnes partant en retraite
TODO : quand les comportements de départ seront plus complexes créer les .py associés'''
if behavior == 'taux_plein':
dates_tauxplein = run_pension(context, yearleg,
time_step=time_step, to_check=to_check,
output='dates_taux_plein', cProfile=cProfile)
date_tauxplein = maximum(dates_tauxplein['RSI'], dates_tauxplein['RG'], dates_tauxplein['FP'])
dates = output_til_to_liam(output_til=date_tauxplein,
index_til=dates_tauxplein['index'],
context_id=context['id'])
return dates.astype(int)
|
gpl-3.0
|
samuel1208/scikit-learn
|
examples/calibration/plot_compare_calibration.py
|
241
|
5008
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
danielhkl/matplotlib2tikz
|
test/test_patches.py
|
1
|
2199
|
# -*- coding: utf-8 -*-
#
import helpers
def plot():
from matplotlib.patches import Circle, Ellipse, Polygon, Rectangle, Wedge
from matplotlib.collections import PatchCollection
from matplotlib import pyplot as plt
import numpy as np
import matplotlib as mpl
np.random.seed(123)
fig = plt.figure()
ax = fig.add_subplot(111)
N = 3
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1 * np.random.rand(N)
patches = []
for x1, y1, r in zip(x, y, radii):
circle = Circle((x1, y1), r)
patches.append(circle)
rect = Rectangle(
xy=[0.0, 0.25],
width=1.0,
height=0.5,
angle=-45.0
)
patches.append(rect)
x = np.random.rand(N)
y = np.random.rand(N)
radii = 0.1*np.random.rand(N)
theta1 = 360.0*np.random.rand(N)
theta2 = 360.0*np.random.rand(N)
for x1, y1, r, t1, t2 in zip(x, y, radii, theta1, theta2):
wedge = Wedge((x1, y1), r, t1, t2)
patches.append(wedge)
# Some limiting conditions on Wedge
patches += [
Wedge((0.3, 0.7), .1, 0, 360), # Full circle
Wedge((0.7, 0.8), .2, 0, 360, width=0.05), # Full ring
Wedge((0.8, 0.3), .2, 0, 45), # Full sector
Wedge((0.8, 0.3), .2, 45, 90, width=0.10), # Ring sector
]
for _ in range(N):
polygon = Polygon(np.random.rand(N, 2), True)
patches.append(polygon)
colors = 100*np.random.rand(len(patches))
p = PatchCollection(patches,
cmap=mpl.cm.jet,
alpha=0.4
)
p.set_array(np.array(colors))
ax.add_collection(p)
ellipse = Ellipse(
xy=[1.0, 0.5],
width=1.0,
height=0.5,
angle=45.0,
alpha=0.4
)
ax.add_patch(ellipse)
circle = Circle(
xy=[0.0, 1.0],
radius=0.5,
color='r',
alpha=0.4
)
ax.add_patch(circle)
plt.colorbar(p)
return fig
def test():
phash = helpers.Phash(plot())
assert phash.phash == '7ff8494a87627116', phash.get_details()
return
|
mit
|
18praveenb/toil-rnaseq-sc
|
src/toil_rnaseq_sc/pachterlab_post.py
|
1
|
3099
|
# This is a modified version of a source file from the repository "scRNA-Seq-tcc-prep" by the Pachter Lab which can be found here: https://github.com/pachterlab/scRNA-Seq-TCC-prep/blob/0469873bdadcc48e34782882dbd24c3939c0542a/source/prep_TCC_matrix.py
# The citation for the paper with which this repository is associated is Ntranos, V., Kamath, G. M., Zhang, J. M., Pachter, L. & Tse, D. N. Fast and accurate single-cell RNA-seq analysis by clustering of transcript-compatibility counts. Genome Biology 17, 112 (2016).
# The entire source of "scRNA-Seq-tcc prep" is also used in Dockerized form in this pipeline.
# The original "scRNA-Seq-TCC-prep" repository was released under GPLv3, as is this repository (and thus this source file). For more details, see the 'README.md' of this repository which contains the full text of the GPL.
import os
import sys, gc
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_distances
from scipy.spatial.distance import *
from scipy.stats import entropy
import pickle
def prep_tcc_matrix(job, threads, tcc_output_dir, save_dir):
"""
For some reason, changing the number of threads to more than one results in a crash.
"""
print "Setting threads to 1... threads value was ignored."
threads = 1
# matrix.ec file
ecfile_dir = os.path.join(tcc_output_dir, "matrix.ec")
tsvfile_dir = os.path.join(tcc_output_dir, "matrix.tsv")
print "Loading TCCs.."
COOinput = np.loadtxt( tsvfile_dir, delimiter='\t' , dtype=float)
rows,cols,data = COOinput.T
nonzero_ec = np.unique(rows)
map_rows = { val:ind for ind,val in enumerate( nonzero_ec ) }
map_cols = { val:ind for ind,val in enumerate( np.unique(cols) ) }
TCCmatrix = coo_matrix( (data.astype(float),( [map_rows[r] for r in rows], [map_cols[c] for c in cols]) ) )
NUM_OF_CELLS = TCCmatrix.shape[1]
print "NUM_OF_CELLS =", NUM_OF_CELLS
T = TCCmatrix.tocsr()
T_norm = normalize(T, norm='l1', axis=0)
T_normT = T_norm.transpose()
del TCCmatrix;
_ = gc.collect()
# Pairwise_distances
def L1_distance(p,q):
return cityblock(p,q).sum()
# def jensen_shannon(p, q):
# m=0.5*p+0.5*q
# p = np.transpose(p[p > 0])
# q = np.transpose(q[q > 0])
# m = np.transpose(m[m > 0])
# return np.sqrt(entropy(m)-0.5*entropy(q)-0.5*entropy(p))
num_of_threads = threads
print "Calculating pairwise L1 distances... ( num_threads =",num_of_threads,")"
# D_js = pairwise_distances(T_normT,metric=jensen_shannon,n_jobs=num_of_threads)
D_l1 = pairwise_distances(T_normT,metric=L1_distance,n_jobs=num_of_threads)
print "writing data..."
# Save data
with open(os.path.join(save_dir, "TCC_matrix.dat"), 'wb') as f:
pickle.dump(T,f)
with open(os.path.join(save_dir, "pwise_dist_L1.dat"), 'wb') as f:
pickle.dump(D_l1,f)
with open(os.path.join(save_dir, "nonzero_ec.dat"), 'wb') as f:
pickle.dump(nonzero_ec,f)
print "DONE."
|
gpl-3.0
|
vshtanko/scikit-learn
|
benchmarks/bench_plot_fastkmeans.py
|
294
|
4676
|
from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
bsd-3-clause
|
quheng/scikit-learn
|
sklearn/neural_network/rbm.py
|
206
|
12292
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
maheshakya/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
255
|
13919
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
|
bsd-3-clause
|
loli/semisupervisedforests
|
sklearn/cluster/tests/test_bicluster.py
|
23
|
9469
|
"""Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
"""Test get_shape and get_indices on fitted model."""
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
"""Test Dhillon's Spectral CoClustering on a simple problem."""
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
"""Test Kluger methods on a checkerboard dataset."""
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
|
bsd-3-clause
|
natanielruiz/android-yolo
|
jni-build/jni/include/tensorflow/examples/skflow/text_classification_character_cnn.py
|
5
|
3961
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(x, y):
"""Character level convolutional neural network model to predict classes."""
y = tf.one_hot(y, 15, 1, 0)
byte_list = tf.reshape(learn.ops.one_hot_matrix(x, 256),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = learn.ops.conv2d(byte_list, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = learn.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
prediction, loss = learn.models.logistic_regression(pool2, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, y_predicted['class'])
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
roxyboy/scikit-learn
|
sklearn/tests/test_calibration.py
|
213
|
12219
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
|
bsd-3-clause
|
ilyes14/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
244
|
7588
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
CELMA-project/dissertation
|
fig/figCreators/innerGhost.py
|
1
|
4602
|
#!/usr/bin/env python
"""
Scripts which plots the inner ghost set up.
"""
import matplotlib.pyplot as plt
import numpy as np
font = {"family":"serif", "serif": ["computer modern roman"]}
titleSize = 25
size = 30
plt.rc("font", size = size)
plt.rc("axes", titlesize = titleSize)
plt.rc("font", **font)
plt.rc("text", usetex=True)
def plotInnerGhost(nx = 4 ,\
dx = 1 ,\
nz = 8 ,\
outerBC = True ,\
annotate = True ,\
showPlot = False,\
extension ="pdf" ):
"""
Plots the inner ghost set up
Parameters
----------
nx : int
Number of points in rho
dx : float
Grid spacing in rho
nz : int
Number of points in theta
outerBC: bool
Whether or not to plot the outer boundary condition
annotate: bool
Whether or not to annotate
showPlot: bool
Whether or not to show the plot
extension: str
Extension of the plot to save
Returns
-------
fileName : str
The name of the saved file
"""
lw = 2
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111)
ax.set_axis_off()
# Make grid
rho = np.linspace(dx/2, (nx-0.5)*dx, nx)
# Make the radii
for radius in rho:
curCircle = plt.Circle((0, 0), radius, color="k", lw=lw, fill=False)
ax.add_artist(curCircle)
eps = 0.5*dx
if outerBC:
# Ghost-point is 0.5 away from last inner
xGhost = nx*dx
rho = np.append(rho, xGhost)
ghostCircle = plt.Circle((0, 0), xGhost, color="k", lw=lw, ls="--",\
fill=False)
ax.add_artist(ghostCircle)
ax.set_xlim([-(xGhost+eps), (xGhost+eps)])
ax.set_ylim([-(xGhost+eps), (xGhost+eps)])
else:
ax.set_xlim([-(rho[-1]+eps), (rho[-1]+eps)])
ax.set_ylim([-(rho[-1]+eps), (rho[-1]+eps)])
# Make the z lines
theta = np.linspace(0, 2*np.pi, nz+1)
# Exclude last point (only count this once)
theta = theta[:-1]
# Have the transformation
# x = rho*np.cos(theta)
# y = rho*np.sin(theta)
for curTheta in theta:
xStart = rho[0 ]*np.cos(curTheta)
xEnd = rho[-1]*np.cos(curTheta)
yStart = rho[0 ]*np.sin(curTheta)
yEnd = rho[-1]*np.sin(curTheta)
ax.plot((xStart, xEnd),(yStart, yEnd), lw=lw, color="k")
# Make the cross in the center
crossLength = dx/6
ax.plot((-crossLength, crossLength),(0,0) , lw=lw, color="k")
ax.plot((0,0), (-crossLength, crossLength), lw=lw, color="k")
# Common point markers
thetaInd = 1
color = "green"
lwM = lw*3.5
radius = dx*0.25
zorder = 10
# Set inner point markers
startTheta = theta[thetaInd]
nPoints = 3
for i in range(nPoints):
xStart = rho[i]*np.cos(startTheta)
yStart = rho[i]*np.sin(startTheta)
markerCircle = plt.Circle((xStart, yStart,), radius,\
color=color, lw=lwM, fill=False,\
zorder=zorder)
ax.add_artist(markerCircle)
# Set ghost-point markers
endTheta = theta[int(len(theta)/2)+thetaInd]
nPoints = 2
for i in range(nPoints):
xStart = rho[i]*np.cos(endTheta)
yStart = rho[i]*np.sin(endTheta)
ghostMarkerCircle = plt.Circle((xStart, yStart,), radius,\
color=color, lw=lwM, ls=":", fill=False,\
zorder=zorder)
ax.add_artist(ghostMarkerCircle)
# Annotate
if annotate:
# NOTE: Text on saved file may differ from plt.show()
size = 70
place = rho[-1]*0.90
ax.annotate(r"$\theta_i$",
xy=(place, place), xycoords='data',
xytext=(place, place), textcoords='data',
size=size, va="center", ha="center",
)
ax.annotate(r"$\theta_i+\pi$",
xy=(-place, -place), xycoords='data',
xytext=(-place, -place), textcoords='data',
size=size, va="center", ha="center",
)
ax.set_aspect("equal")
fig.tight_layout()
fileName = "../innerGhost.{}".format(extension)
fig.savefig(fileName, transparent=True)
if showPlot:
plt.show()
return fileName
if __name__ == "__main__":
from subprocess import Popen
fileName = plotInnerGhost()
# Crop with pdfCrop
Popen("pdfcrop {0} {0}".format(fileName), shell=True).wait()
|
gpl-3.0
|
yunfeilu/scikit-learn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
338
|
4324
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
bsd-3-clause
|
artmusic0/theano-learning.part03
|
Myfile_run-py_release_turble-training/cnn.py
|
3
|
9653
|
import os
import sys, getopt
import time
import numpy
import theano
import cPickle
import theano.tensor as T
from sklearn import preprocessing
from logistic_sgd import LogisticRegression
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
def ReLU(x):
y = T.maximum(0.0, x)
return (y)
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(inumpyut,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type inumpyut: theano.tensor.dmatrix
:param inumpyut: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of inumpyut
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.inp = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
class CNN(object):
def __getstate__(self):
weights = [p.get_value() for p in self.params]
#return (self.layer0.W, self.layer0.b, self.layer1.W, self.layer1.b, self.layer2.W,
# self.layer2.b, self.layer3.W, self.layer3.b)
return weights
def __setstate__(self, weights):
# (self.layer0.W, self.layer0.b, self.layer1.W, self.layer1.b, self.layer2.W, self.layer2.b, self.layer3.W, self.layer3.b) = state
i = iter(weights)
for p in self.params:
p.set_value(i.next())
def __init__(self, rng, input, nkerns, batch_size):
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
self.layer0_input = input.reshape((batch_size, 1, 512, 288))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
self.layer0 = LeNetConvPoolLayer(
rng,
input=self.layer0_input, # 288 = 4*72 // 512 = 4*128
# 72 = 4*18 // 128 =4 * 32 // 2 9 16
image_shape=(batch_size, 1, 512, 288),
filter_shape=(nkerns[0], 1, 17, 9), # 280 496
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
self.layer1 = LeNetConvPoolLayer(
rng,
input=self.layer0.output,
image_shape=(batch_size, nkerns[0], 248, 140),
filter_shape=(nkerns[1], nkerns[0], 17, 9),# 132 232
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[2], 4, 4)
self.layer2 = LeNetConvPoolLayer(
rng,
input=self.layer1.output,
image_shape=(batch_size, nkerns[1], 116, 66),
filter_shape=(nkerns[2], nkerns[1], 17, 9), # 58 100
poolsize=(2, 2)
)
# 120/8 = 15 64/8 =8
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
self.layer3_input = self.layer2.output.flatten(2)
# construct a fully-connected sigmoidal layer
self.layer3 = HiddenLayer(
rng,
input=self.layer3_input,
n_in=nkerns[2] * 50 * 29,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
self.layer4 = LogisticRegression(input=self.layer3.output, n_in=500, n_out=100)
# the cost we minimize during training is the NLL of the model
# self.cost = self.layer3.negative_log_likelihood(y)
self.errors = self.layer4.errors
# create a list of all model parameters to be fit by gradient descent
self.params = self.layer4.params + self.layer3.params + self.layer2.params + self.layer1.params + self.layer0.params
|
gpl-3.0
|
ronekko/deep_metric_learning
|
lib/datasets/data_provider.py
|
1
|
6900
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import collections
import numpy as np
from sklearn.preprocessing import LabelEncoder
from fuel.streams import DataStream
from fuel.schemes import IterationScheme, BatchSizeScheme, SequentialScheme
from .cars196_dataset import Cars196Dataset
from .cub200_2011_dataset import Cub200_2011Dataset
from .online_products_dataset import OnlineProductsDataset
from .random_fixed_size_crop_mod import RandomFixedSizeCrop
def get_dataset_class(dataset_name):
"""
Args:
dataset (str):
specify the dataset from 'cars196', 'cub200_2011', 'products'.
"""
if dataset_name == 'cars196':
dataset_class = Cars196Dataset
elif dataset_name == 'cub200_2011':
dataset_class = Cub200_2011Dataset
elif dataset_name == 'products':
dataset_class = OnlineProductsDataset
else:
raise ValueError(
"`dataset` must be 'cars196', 'cub200_2011' or 'products'.")
return dataset_class
def get_streams(batch_size=50, dataset='cars196', method='n_pairs_mc',
crop_size=224, load_in_memory=False):
'''
args:
batch_size (int):
number of examples per batch
dataset (str):
specify the dataset from 'cars196', 'cub200_2011', 'products'.
method (str or fuel.schemes.IterationScheme):
batch construction method. Specify 'n_pairs_mc', 'clustering', or
a subclass of IterationScheme that has constructor such as
`__init__(self, batch_size, dataset_train)` .
crop_size (int or tuple of ints):
height and width of the cropped image.
'''
dataset_class = get_dataset_class(dataset)
dataset_train = dataset_class(['train'], load_in_memory=load_in_memory)
dataset_test = dataset_class(['test'], load_in_memory=load_in_memory)
if not isinstance(crop_size, tuple):
crop_size = (crop_size, crop_size)
if method == 'n_pairs_mc':
labels = dataset_class(
['train'], sources=['targets'], load_in_memory=True).data_sources
scheme = NPairLossScheme(labels, batch_size)
elif method == 'clustering':
scheme = EpochwiseShuffledInfiniteScheme(
dataset_train.num_examples, batch_size)
elif issubclass(method, IterationScheme):
scheme = method(batch_size, dataset=dataset_train)
else:
raise ValueError("`method` must be 'n_pairs_mc' or 'clustering' "
"or subclass of IterationScheme.")
stream = DataStream(dataset_train, iteration_scheme=scheme)
stream_train = RandomFixedSizeCrop(stream, which_sources=('images',),
random_lr_flip=True,
window_shape=crop_size)
stream_train_eval = RandomFixedSizeCrop(DataStream(
dataset_train, iteration_scheme=SequentialScheme(
dataset_train.num_examples, batch_size)),
which_sources=('images',), center_crop=True, window_shape=crop_size)
stream_test = RandomFixedSizeCrop(DataStream(
dataset_test, iteration_scheme=SequentialScheme(
dataset_test.num_examples, batch_size)),
which_sources=('images',), center_crop=True, window_shape=crop_size)
return stream_train, stream_train_eval, stream_test
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size // 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
class EpochwiseShuffledInfiniteScheme(BatchSizeScheme):
def __init__(self, indexes, batch_size):
if not isinstance(indexes, collections.Iterable):
indexes = range(indexes)
if batch_size > len(indexes):
raise ValueError('batch_size must not be larger than the indexes.')
if len(indexes) != len(np.unique(indexes)):
raise ValueError('Items in indexes must be unique.')
self._original_indexes = np.array(indexes, dtype=np.int).flatten()
self.batch_size = batch_size
self._shuffled_indexes = np.array([], dtype=np.int)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
batch_size = self.batch_size
# if remaining indexes are shorter than batch_size then new shuffled
# indexes are appended to the remains.
num_remains = len(self._shuffled_indexes)
if num_remains < batch_size:
num_overrun = batch_size - num_remains
new_shuffled_indexes = self._original_indexes.copy()
# ensure the batch of indexes from the joint part does not contain
# duplicate index.
np.random.shuffle(new_shuffled_indexes)
overrun = new_shuffled_indexes[:num_overrun]
next_indexes = np.concatenate((self._shuffled_indexes, overrun))
while len(next_indexes) != len(np.unique(next_indexes)):
np.random.shuffle(new_shuffled_indexes)
overrun = new_shuffled_indexes[:num_overrun]
next_indexes = np.concatenate(
(self._shuffled_indexes, overrun))
self._shuffled_indexes = np.concatenate(
(self._shuffled_indexes, new_shuffled_indexes))
next_indexes = self._shuffled_indexes[:batch_size]
self._shuffled_indexes = self._shuffled_indexes[batch_size:]
return next_indexes.tolist()
def get_request_iterator(self):
return self
|
mit
|
terkkila/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
225
|
10791
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
tawsifkhan/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
maheshakya/scikit-learn
|
examples/ensemble/plot_partial_dependence.py
|
249
|
4456
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
|
bsd-3-clause
|
HKUST-SING/tensorflow
|
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
|
137
|
2219
|
# encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
prheenan/Research
|
Perkins/AnalysisUtil/ForceExtensionAnalysis/FEC_Plot.py
|
1
|
9081
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util
import GeneralUtil.python.PlotUtilities as PlotUtilities
from GeneralUtil.python.IgorUtil import SavitskyFilter
import copy
def_conversion_opts =dict(ConvertX = lambda x: x*1e9,
ConvertY = lambda y: y*1e12)
def _fec_base_plot(x,y,n_filter_points=None,label="",
style_data=dict(color='k',alpha=0.3),
style_filtered=None):
"""
base function; plots x and y (and their filtered versions)
Args:
x/y: the x and y to use for plotting
n_filter_points: how many points for the savitsky golay
style_<data/filtered>: plt.plot options for the raw and filtered data.
defaults to filtered just being alpha=1 (not transparent)
Returns:
x and y, filtered versions
"""
if (style_filtered is None):
style_filtered = dict(**style_data)
style_filtered['alpha'] = 1
style_filtered['label'] = label
if (n_filter_points is None):
n_filter_points = int(np.ceil(x.size * FEC_Util.default_filter_pct))
x_filtered = SavitskyFilter(x,nSmooth=n_filter_points)
y_filtered = SavitskyFilter(y,nSmooth=n_filter_points)
plt.plot(x,y,**style_data)
plt.plot(x_filtered,y_filtered,**style_filtered)
return x_filtered,y_filtered
def _ApproachRetractCurve(Appr,Retr,NFilterPoints=100,
x_func = lambda x: x.Separation,
y_func = lambda y: y.Force,
ApproachLabel="Approach",
RetractLabel="Retract"):
"""
Most of the brains for the approach/retract curve. does *not* show anything
Args:
TimeSepForceObject: what we are plotting
NFilterPoints: how many points to filter down
ApproachLabel: label to put on the approach
RetractLabel: label to put on the retract
"""
# plot the separation and force, with their filtered counterparts
_fec_base_plot(x_func(Appr),y_func(Appr),n_filter_points=NFilterPoints,
style_data=dict(color='r',alpha=0.3),label=ApproachLabel)
_fec_base_plot(x_func(Retr),y_func(Retr),n_filter_points=NFilterPoints,
style_data=dict(color='b',alpha=0.3),label=RetractLabel)
def FEC_AlreadySplit(Appr,Retr,
XLabel = "Separation (nm)",
YLabel = "Force (pN)",
ConversionOpts=def_conversion_opts,
PlotLabelOpts=dict(),
PreProcess=False,
NFilterPoints=50,
LegendOpts=dict(loc='best'),
**kwargs):
"""
Args:
XLabel: label for x axis
YLabel: label for y axis
ConversionOpts: see FEC_Util.SplitAndProcess
PlotLabelOpts: see arguments after filtering of ApproachRetractCurve
PreProcess: if true, pre-processes the approach and retract separately
(ie: to zero and flip the y axis).
NFilterPoints: see FEC_Util.SplitAndProcess, for Savitsky-golay
PreProcess: passed to
"""
ApprCopy = FEC_Util.UnitConvert(Appr,**ConversionOpts)
RetrCopy = FEC_Util.UnitConvert(Retr,**ConversionOpts)
if (PreProcess):
ApprCopy,RetrCopy = FEC_Util.PreProcessApproachAndRetract(ApprCopy,
RetrCopy,
**kwargs)
_ApproachRetractCurve(ApprCopy,RetrCopy,
NFilterPoints=NFilterPoints,**PlotLabelOpts)
PlotUtilities.lazyLabel(XLabel,YLabel,"")
PlotUtilities.legend(**LegendOpts)
def z_sensor_vs_time(time_sep_force,**kwargs):
"""
plots z sensor versus time. See force_versus_time
"""
plot_labels = dict(x_func=lambda x : x.Time,
y_func=lambda x : x.ZSnsr)
FEC(time_sep_force,
PlotLabelOpts=plot_labels,
XLabel="Time (s)",
YLabel="ZSnsr (nm)",**kwargs)
def force_versus_time(time_sep_force,**kwargs):
"""
Plots force versus time
Args:
**kwargs: see FEC
"""
plot_labels = dict(x_func=lambda x : x.Time,
y_func=lambda x: x.Force)
FEC(time_sep_force,
PlotLabelOpts=plot_labels,
XLabel="Time (s)",
YLabel="Force (pN)",**kwargs)
def FEC(TimeSepForceObj,NFilterPoints=50,
PreProcessDict=dict(),
**kwargs):
"""
Plots a force extension curve. Splits the curve into approach and
Retract and pre-processes by default
Args:
TimeSepForceObj: 'Raw' TimeSepForce Object
PreProcessDict: passed directly to FEC_Util.PreProcessFEC
**kwargs: passed directly to FEC_Plot.FEC_AlreadySplit
"""
Appr,Retr= FEC_Util.PreProcessFEC(TimeSepForceObj,
NFilterPoints=NFilterPoints,
**PreProcessDict)
# plot the approach and retract with the appropriate units
FEC_AlreadySplit(Appr,Retr,NFilterPoints=NFilterPoints,**kwargs)
def heat_map_fec(time_sep_force_objects,num_bins=(100,100),
separation_max = None,n_filter_func=None,use_colorbar=True,
ConversionOpts=def_conversion_opts,cmap='afmhot'):
"""
Plots a force extension curve. Splits the curve into approach and
Retract and pre-processes by default
Args:
time_sep_force_objects: list of (zeroed, but SI) TimeSepForce Object
num_bins: tuple of <x,y> bins. Passed to hist2d
n_filter_func: if not none, histograms the savitsky-golay *filtered*
versuon of the objects given, with n_filter_func being a function
taking in the TimeSepForce object and returning an integer number of
points
use_colorbar: if true, add a color bar
separation_max: if not None, only histogram up to and including this
separation. should be in units *after* conversion (default: nanometers)
ConversionOpts: passed to UnitConvert. Default converts x to nano<X>
and y to pico<Y>
"""
# convert everything...
objs = [FEC_Util.UnitConvert(r,**ConversionOpts)
for r in time_sep_force_objects]
if n_filter_func is not None:
objs = [FEC_Util.GetFilteredForce(o,n_filter_func(o))
for o in objs]
filtered_data = [(retr.Separation,retr.Force) for retr in objs]
separations = np.concatenate([r[0] for r in filtered_data])
forces = np.concatenate([r[1] for r in filtered_data])
if (separation_max is not None):
idx_use = np.where(separations < separation_max)
else:
# use everything
idx_use = slice(0,None,1)
separations = separations[idx_use]
forces = forces[idx_use]
# make a heat map, essentially
counts, xedges, yedges, Image = plt.hist2d(separations, forces,
bins=num_bins,cmap=cmap)
PlotUtilities.lazyLabel("Separation (nm)",
"Force (pN)",
"Force-Extension Heatmap")
if (use_colorbar):
cbar = plt.colorbar()
label = '# of points in (Force,Separation) Bin'
cbar.set_label(label,labelpad=10,rotation=270)
def _n_rows_and_cols(processed,n_cols=3):
n_rows = int(np.ceil(len(processed)/n_cols))
return n_rows,n_cols
def gallery_fec(processed,xlim_nm,ylim_pN,NFilterPoints=100,n_cols=3,
x_label="Separation (nm)",y_label="Force (pN)",
approach_label="Approach",
retract_label="Retract"):
n_rows,n_cols = _n_rows_and_cols(processed,n_cols)
for i,r in enumerate(processed):
plt.subplot(n_rows,n_cols,(i+1))
appr,retr = r
is_labelled = i == 0
is_first = (i % n_cols == 0)
is_bottom = ((i + (n_cols)) >= len(processed))
XLabel = x_label if is_bottom else ""
YLabel = y_label if is_first else ""
ApproachLabel = approach_label if is_labelled else ""
RetractLabel = retract_label if is_labelled else ""
PlotLabelOpts = dict(ApproachLabel=ApproachLabel,
RetractLabel=RetractLabel)
LegendOpts = dict(loc='upper left',frameon=True)
FEC_AlreadySplit(appr,retr,XLabel=XLabel,YLabel=YLabel,
LegendOpts=LegendOpts,
PlotLabelOpts=PlotLabelOpts,
NFilterPoints=NFilterPoints)
plt.xlim(xlim_nm)
plt.ylim(ylim_pN)
ax = plt.gca()
if (not is_bottom):
ax.tick_params(labelbottom='off')
if (not is_first):
ax.tick_params(labelleft='off')
|
gpl-3.0
|
yutiansut/QUANTAXIS
|
QUANTAXIS/QAMarket/QATTSBroker.py
|
2
|
19455
|
# coding:utf-8
import os
import requests
import json
import urllib
import base64
import datetime
import configparser
import pandas as pd
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from QUANTAXIS.QAMarket.QAOrderHandler import QA_OrderHandler
from QUANTAXIS.QAFetch.QATdx import (
QA_fetch_get_future_day,
QA_fetch_get_future_min,
QA_fetch_get_index_day,
QA_fetch_get_index_min,
QA_fetch_get_stock_day,
QA_fetch_get_stock_min
)
from QUANTAXIS.QAMarket.common import (
cn_en_compare,
order_status_cn_en,
trade_towards_cn_en
)
from QUANTAXIS.QAMarket.QABroker import QA_Broker
from QUANTAXIS import QAFetch
from QUANTAXIS.QAUtil.QALogs import QA_util_log_info
from QUANTAXIS.QAEngine.QAEvent import QA_Event
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION, MARKET_TYPE, ORDER_MODEL, TRADE_STATUS, FREQUENCE, BROKER_EVENT, BROKER_TYPE, MARKET_EVENT
from QUANTAXIS.QAUtil.QASetting import setting_path
class TTSConfig(configparser.ConfigParser):
__config_path = '{}{}{}'.format(setting_path, os.sep, 'config.ini')
__config_section = 'TTSConfig'
values = {
'trade_server_ip': '127.0.0.1',
'trade_server_port': '19820',
'tdx_server_ip': '60.12.142.37',
'tdx_server_port': '7708',
'tdx_version': '6.44',
'transport_enc_key': '',
'transport_enc_iv': '',
'user_yyb': 1,
'user_name': '',
'user_pass': '',
'user_tx_pass': ''
}
def __init__(self):
super().__init__()
if not os.path.exists(self.__config_path):
self.__generate_default()
else:
self.read(self.__config_path)
if not self.has_section(self.__config_section):
self.__generate_default()
else:
for key in self.values.keys():
key = str(key)
if self.has_option(self.__config_section, key):
self.values[key] = self.get(self.__config_section, key)
if self.values['user_name'] == '' or self.values['user_pass'] == '':
raise Exception(
'user_name 和 user_pass不能为空,请在%s中配置' % self.__config_path
)
self.values['user_tx_pass'] = self.values['user_pass'] if self.values[
'user_tx_pass'] == '' else self.values['user_tx_pass']
def __generate_default(self):
f = open(self.__config_path, 'w')
self.add_section(self.__config_section)
for key, value in self.values.items():
self.set(self.__config_section, str(key), str(value))
self.write(f)
f.close()
class QA_TTSBroker(QA_Broker):
fetcher = {
(MARKET_TYPE.STOCK_CN,
FREQUENCE.DAY): QA_fetch_get_stock_day,
(MARKET_TYPE.STOCK_CN,
FREQUENCE.FIFTEEN_MIN): QA_fetch_get_stock_min,
(MARKET_TYPE.STOCK_CN,
FREQUENCE.ONE_MIN): QA_fetch_get_stock_min,
(MARKET_TYPE.STOCK_CN,
FREQUENCE.FIVE_MIN): QA_fetch_get_stock_min,
(MARKET_TYPE.STOCK_CN,
FREQUENCE.THIRTY_MIN): QA_fetch_get_stock_min,
(MARKET_TYPE.STOCK_CN,
FREQUENCE.SIXTY_MIN): QA_fetch_get_stock_min,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.DAY): QA_fetch_get_index_day,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.FIFTEEN_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.ONE_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.FIVE_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.THIRTY_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.INDEX_CN,
FREQUENCE.SIXTY_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.FUND_CN,
FREQUENCE.DAY): QA_fetch_get_index_day,
(MARKET_TYPE.FUND_CN,
FREQUENCE.FIFTEEN_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.FUND_CN,
FREQUENCE.ONE_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.FUND_CN,
FREQUENCE.FIVE_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.FUND_CN,
FREQUENCE.THIRTY_MIN): QA_fetch_get_index_min,
(MARKET_TYPE.FUND_CN,
FREQUENCE.SIXTY_MIN): QA_fetch_get_index_min
}
def __init__(self, auto_logon=True):
super().__init__()
self.name = BROKER_TYPE.TTS
self.config = TTSConfig()
self.order_handler = QA_OrderHandler()
self._endpoint = 'http://%s:%s/api' % (
self.config.values['trade_server_ip'],
self.config.values['trade_server_port']
)
self._encoding = "utf-8"
if self.config.values['transport_enc_key'] == '' or self.config.values[
'transport_enc_iv'] == '':
self._transport_enc = False
self._transport_enc_key = None
self._transport_enc_iv = None
self._cipher = None
else:
self._transport_enc = True
self._transport_enc_key = bytes(
self.config.values['transport_enc_key'],
encoding=self._encoding
)
self._transport_enc_iv = bytes(
self.config.values['transport_enc_iv'],
encoding=self._encoding
)
self._cipher = Cipher(
algorithms.AES(self._transport_enc_key),
modes.CBC(self._transport_enc_iv),
backend=default_backend()
)
self._session = requests.Session()
self.client_id = 0
self.gddm_sh = 0 # 上海股东代码
self.gddm_sz = 0 # 深圳股东代码
if auto_logon is True:
self.logon()
def call(self, func, params=None):
json_obj = {"func": func}
if params is not None:
json_obj["params"] = params
if self._transport_enc:
data_to_send = self.encrypt(json_obj)
response = self._session.post(self._endpoint, data=data_to_send)
else:
response = self._session.post(self._endpoint, json=json_obj)
response.encoding = self._encoding
text = response.text
if self._transport_enc:
decoded_text = self.decrypt(text)
# print(decoded_text)
return json.loads(decoded_text)
else:
return json.loads(text)
def encrypt(self, source_obj):
encrypter = self._cipher.encryptor()
source = json.dumps(source_obj)
source = source.encode(self._encoding)
need_to_padding = 16 - (len(source) % 16)
if need_to_padding > 0:
source = source + b'\x00' * need_to_padding
enc_data = encrypter.update(source) + encrypter.finalize()
b64_enc_data = base64.encodebytes(enc_data)
return urllib.parse.quote(b64_enc_data)
def decrypt(self, source):
decrypter = self._cipher.decryptor()
source = urllib.parse.unquote(source)
source = base64.decodebytes(source.encode("utf-8"))
data_bytes = decrypter.update(source) + decrypter.finalize()
return data_bytes.rstrip(b"\x00").decode(self._encoding)
def data_to_df(self, result):
if 'data' in result:
data = result['data']
df = pd.DataFrame(data=data)
df.rename(
columns=lambda x: cn_en_compare[x] if x in cn_en_compare else x,
inplace=True
)
if hasattr(df, 'towards'):
df.towards = df.towards.apply(
lambda x: trade_towards_cn_en[x]
if x in trade_towards_cn_en else x
)
if hasattr(df, 'status'):
df.status = df.status.apply(
lambda x: order_status_cn_en[x]
if x in order_status_cn_en else x
)
if hasattr(df, 'order_time'):
df.order_time = df.order_time.apply(
lambda x: '{} {}'.format(
datetime.date.today().strftime('%Y-%m-%d'),
datetime.datetime.strptime(x,
'%H%M%S').
strftime('%H:%M:%S')
)
)
if hasattr(df, 'trade_time'):
df.trade_time = df.trade_time.apply(
lambda x: '{} {}'.format(
datetime.date.today().strftime('%Y-%m-%d'),
datetime.datetime.strptime(x,
'%H%M%S').
strftime('%H:%M:%S')
)
)
if hasattr(df, 'realorder_id'):
df.realorder_id = df.realorder_id.apply(str)
if hasattr(df, 'amount'):
df.amount = df.amount.apply(pd.to_numeric)
if hasattr(df, 'price'):
df.price = df.price.apply(pd.to_numeric)
if hasattr(df, 'money'):
df.money = df.money.apply(pd.to_numeric)
if hasattr(df, 'trade_amount'):
df.trade_amount = df.trade_amount.apply(pd.to_numeric)
if hasattr(df, 'trade_price'):
df.trade_price = df.trade_price.apply(pd.to_numeric)
if hasattr(df, 'trade_money'):
df.trade_money = df.trade_money.apply(pd.to_numeric)
if hasattr(df, 'order_price'):
df.order_price = df.order_price.apply(pd.to_numeric)
if hasattr(df, 'order_amount'):
df.order_amount = df.order_amount.apply(pd.to_numeric)
if hasattr(df, 'order_money'):
df.order_money = df.order_money.apply(pd.to_numeric)
if hasattr(df, 'cancel_amount'):
df.cancel_amount = df.cancel_amount.apply(pd.to_numeric)
return df
else:
return pd.DataFrame()
#------ functions
def ping(self):
return self.call("ping", {})
def logon(self):
data = self.call(
"logon",
{
"ip": self.config.values['tdx_server_ip'],
"port": int(self.config.values['tdx_server_port']),
"version": self.config.values['tdx_version'],
"yyb_id": int(self.config.values['user_yyb']),
"account_no": self.config.values['user_name'],
"trade_account": self.config.values['user_name'],
"jy_password": self.config.values['user_pass'],
"tx_password": self.config.values['user_tx_pass']
}
)
if data['success']:
self.client_id = data["data"]["client_id"]
self.gddm_sh = self.query_data(5)['data'][0]['股东代码']
self.gddm_sz = self.query_data(5)['data'][1]['股东代码']
print('上海股东代码:%s,深圳股东代码:%s', self.gddm_sh, self.gddm_sz)
return data
def logoff(self):
return self.call("logoff", {"client_id": self.client_id})
def query_data(self, category):
return self.call(
"query_data",
{
"client_id": self.client_id,
"category": category
}
)
def send_order(
self,
code,
price,
amount,
towards,
order_model,
market=None
):
"""下单
Arguments:
code {[type]} -- [description]
price {[type]} -- [description]
amount {[type]} -- [description]
towards {[type]} -- [description]
order_model {[type]} -- [description]
market:市场,SZ 深交所,SH 上交所
Returns:
[type] -- [description]
"""
towards = 0 if towards == ORDER_DIRECTION.BUY else 1
if order_model == ORDER_MODEL.MARKET:
order_model = 4
elif order_model == ORDER_MODEL.LIMIT:
order_model = 0
if market is None:
market = QAFetch.base.get_stock_market(code)
if not isinstance(market, str):
raise Exception('%s不正确,请检查code和market参数' % market)
market = market.lower()
if market not in ['sh', 'sz']:
raise Exception('%s不支持,请检查code和market参数' % market)
return self.data_to_df(
self.call(
"send_order",
{
'client_id': self.client_id,
'category': towards,
'price_type': order_model,
'gddm': self.gddm_sh if market == 'sh' else self.gddm_sz,
'zqdm': code,
'price': price,
'quantity': amount
}
)
)
def cancel_order(self, exchange_id, order_id):
"""
Arguments:
exchange_id {[type]} -- 交易所 0 深圳 1上海 (偶尔2是深圳)
order_id {[type]} -- [description]
Returns:
[type] -- [description]
"""
return self.call(
"cancel_order",
{
'client_id': self.client_id,
'exchange_id': exchange_id,
'hth': order_id
}
)
def get_quote(self, code):
return self.call(
"get_quote",
{
'client_id': self.client_id,
'code': code,
}
)
def repay(self, amount):
return self.call(
"repay",
{
'client_id': self.client_id,
'amount': amount
}
)
def receive_order(self, event):
res = self.send_order(
code=event.order.code,
price=event.order.price,
amount=event.order.amount,
towards=event.order.towards,
order_model=event.order.order_model
)
try:
event.order.queued(res.realorder_id[0])
print('success receive order {}'.format(event.order.realorder_id))
except Exception as e:
print(res.realorder_id[0])
print(event.order)
print(e)
event.order.failed()
print(
'FAILED FOR CREATE ORDER {} {}'.format(
event.order.account_cookie,
event.order.status
)
)
return event.order
def run(self, event):
# if event.event_type is MARKET_EVENT.QUERY_DATA:
# self.order_handler.run(event)
# try:
# data = self.fetcher[(event.market_type, event.frequence)](
# code=event.code, start=event.start, end=event.end).values[0]
# if 'vol' in data.keys() and 'volume' not in data.keys():
# data['volume'] = data['vol']
# elif 'vol' not in data.keys() and 'volume' in data.keys():
# data['vol'] = data['volume']
# return data
# except Exception as e:
# QA_util_log_info('MARKET_ENGING ERROR: {}'.format(e))
# return None
# elif event.event_type is BROKER_EVENT.RECEIVE_ORDER:
# self.order_handler.run(event)
# elif event.event_type is BROKER_EVENT.TRADE:
# event = self.order_handler.run(event)
# event.message = 'trade'
# if event.callback:
# event.callback(event)
# el
if event.event_type is MARKET_EVENT.QUERY_ORDER:
self.order_handler.run(event)
elif event.event_type is BROKER_EVENT.SETTLE:
self.order_handler.run(event)
if event.callback:
event.callback('settle')
def get_market(self, order):
try:
data = self.fetcher[(order.market_type,
order.frequence)](
code=order.code,
start=order.datetime,
end=order.datetime
).values[0]
if 'vol' in data.keys() and 'volume' not in data.keys():
data['volume'] = data['vol']
elif 'vol' not in data.keys() and 'volume' in data.keys():
data['vol'] = data['volume']
return data
except Exception as e:
QA_util_log_info('MARKET_ENGING ERROR: {}'.format(e))
return None
def query_orders(self, account_cookie, status='filled'):
df = self.data_to_df(self.query_data(3 if status == 'filled' else 2))
df['account_cookie'] = account_cookie
if status == 'filled':
df = df[self.dealstatus_headers] if len(df) > 0 else pd.DataFrame(
columns=self.dealstatus_headers
)
else:
df['cancel_amount'] = 0
df = df[self.orderstatus_headers] if len(df) > 0 else pd.DataFrame(
columns=self.orderstatus_headers
)
return df.set_index(['account_cookie', 'realorder_id']).sort_index()
def query_positions(self, account_cookie):
data = {
'cash_available': 0.00,
'hold_available': {},
}
try:
result = self.query_data(0)
if 'data' in result and len(result['data']) > 0:
# 使用减法避免因为账户日内现金理财导致可用金额错误
data['cash_available'] = round(
float(result['data'][0]['总资产']) - float(
result['data'][0]['最新市值']
) - float(result['data'][0]['冻结资金']),
2
)
result = self.data_to_df(self.query_data(1))
if len(result) > 0:
result.index = result.code
if hasattr(result, 'amount'):
data['hold_available'] = result.amount
return data
except:
print(e)
return data
if __name__ == "__main__":
import os
import QUANTAXIS as QA
print(
'在运行前 请先运行tdxtradeserver的 exe文件, 目录是你直接get_tts指定的 一般是 C:\tdxTradeServer'
)
api = QA_TTSBroker(auto_logon=False)
print("---Ping---")
result = api.ping()
print(result)
print("---登入---")
result = api.logon()
if result["success"]:
for i in (0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15):
print("---查询信息 cate=%d--" % i)
print(api.data_to_df(api.query_data(i)))
print('==============================下面是下单部分========================')
print('即将演示的是 下单000001 数量100股 价格9.8 的限价单模式')
if str(input('我已知晓, 并下单 按y继续 n 退出'))[0] == 'y':
print(
api.send_order(
code='000001',
price=9.8,
amount=100,
towards=QA.ORDER_DIRECTION.BUY,
order_model=QA.ORDER_MODEL.LIMIT
)
)
print("---登出---")
print(api.logoff())
|
mit
|
Windy-Ground/scikit-learn
|
sklearn/metrics/metrics.py
|
233
|
1262
|
import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
|
bsd-3-clause
|
BiaDarkia/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
21
|
17406
|
from __future__ import division
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_in
from sklearn.utils.fixes import sp_version
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_almost_equal(X1.data, X2.data)
assert_array_almost_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_almost_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
def test_load_invalid_file():
assert_raises(ValueError, load_svmlight_file, invalidfile)
def test_load_invalid_order_file():
assert_raises(ValueError, load_svmlight_file, invalidfile2)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
assert_raises(ValueError, load_svmlight_file, f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
def test_load_invalid_file2():
assert_raises(ValueError, load_svmlight_files,
[datafile, invalidfile, datafile])
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
assert_raises(TypeError, load_svmlight_file, .42)
def test_invalid_filename():
assert_raises(IOError, load_svmlight_file, "trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
def test_load_zeros():
f = BytesIO()
true_X = sp.csr_matrix(np.zeros(shape=(3, 4)))
true_y = np.array([0, 1, 0])
dump_svmlight_file(true_X, true_y, f)
for zero_based in ['auto', True, False]:
f.seek(0)
X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based)
assert_array_almost_equal(y, true_y)
assert_array_almost_equal(X.toarray(), true_X.toarray())
def test_load_with_offsets():
def check_load_with_offsets(sparsity, n_samples, n_features):
rng = np.random.RandomState(0)
X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
if sparsity:
X[X < sparsity] = 0.0
X = sp.csr_matrix(X)
y = rng.randint(low=0, high=2, size=n_samples)
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
size = len(f.getvalue())
# put some marks that are likely to happen anywhere in a row
mark_0 = 0
mark_1 = size // 3
length_0 = mark_1 - mark_0
mark_2 = 4 * size // 5
length_1 = mark_2 - mark_1
# load the original sparse matrix into 3 independent CSR matrices
X_0, y_0 = load_svmlight_file(f, n_features=n_features,
offset=mark_0, length=length_0)
X_1, y_1 = load_svmlight_file(f, n_features=n_features,
offset=mark_1, length=length_1)
X_2, y_2 = load_svmlight_file(f, n_features=n_features,
offset=mark_2)
y_concat = np.concatenate([y_0, y_1, y_2])
X_concat = sp.vstack([X_0, X_1, X_2])
assert_array_almost_equal(y, y_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
# Generate a uniformly random sparse matrix
for sparsity in [0, 0.1, .5, 0.99, 1]:
for n_samples in [13, 101]:
for n_features in [2, 7, 41]:
yield check_load_with_offsets, sparsity, n_samples, n_features
def test_load_offset_exhaustive_splits():
rng = np.random.RandomState(0)
X = np.array([
[0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 6],
[1, 2, 3, 4, 0, 6],
[0, 0, 0, 0, 0, 0],
[1, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0],
])
X = sp.csr_matrix(X)
n_samples, n_features = X.shape
y = rng.randint(low=0, high=2, size=n_samples)
query_id = np.arange(n_samples) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id)
f.seek(0)
size = len(f.getvalue())
# load the same data in 2 parts with all the possible byte offsets to
# locate the split so has to test for particular boundary cases
for mark in range(size):
if sp_version < (0, 14) and (mark == 0 or mark > size - 100):
# old scipy does not support sparse matrices with 0 rows.
continue
f.seek(0)
X_0, y_0, q_0 = load_svmlight_file(f, n_features=n_features,
query_id=True, offset=0,
length=mark)
X_1, y_1, q_1 = load_svmlight_file(f, n_features=n_features,
query_id=True, offset=mark,
length=-1)
q_concat = np.concatenate([q_0, q_1])
y_concat = np.concatenate([y_0, y_1])
X_concat = sp.vstack([X_0, X_1])
assert_array_almost_equal(y, y_concat)
assert_array_equal(query_id, q_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
def test_load_with_offsets_error():
assert_raises_regex(ValueError, "n_features is required",
load_svmlight_file, datafile, offset=3, length=3)
|
bsd-3-clause
|
yzkang/QH_FInSight
|
code/RESULT_ENSEMBLE_gbdtgbdt_A_and_lgbgbdt_B.py
|
1
|
1398
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# date: 2017
# author: Kyz
# desc: 融合B_lgbgbdt和A_gbdtgbdt,线上0.606803
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == '__main__':
line = range(13463)
data5885 = pd.read_csv(u'../result/GBDT线上0.5885用了LGB特征选择/B_test_fslgb400_predict_without_cv_fillna_10_rd_0_GBDT_N_400_features_109.csv'
, index_col='no')
data584806 = pd.read_csv(u'../result/GBDT线上0.584806的结果GBDT特征选择/B_test_final_predict_fillna_10_rd_0_GBDT_N_400_features_142.csv',
index_col='no')
data_use_A_GBDT_offline_5805 = pd.read_csv('../result/B_test_2fs_using_A_GBDT_without_cv_fillna_1_N_141_features_192_offline_0.580506564827.csv',
index_col='no')
data_save1 = data584806
w1 = 0.245
w2 = 0.755
avg1 = data_use_A_GBDT_offline_5805['pred'] * w1 + data5885['pred'] * w2
data_save1['pred'] = avg1
temp = []
for i in range(13463):
if avg1.values[i] >= 0.085:
temp.append(avg1.values[i])
print len(temp)
data_save1.to_csv('../result/weighted_avg_of_' + str(w1) + '_Agbdtbe_' + str(w2) + '_m5885_' + str(len(temp))+'.csv')
# print avg1
# plt.figure(3)
# plt.scatter(x=line, y=avg1)
# # plt.show()
|
mpl-2.0
|
shotaroikeda/MoodTrakr
|
convert.py
|
1
|
5064
|
import pandas as pd
import numpy as np
import collections
import copy
import re
import ansiterm as Color
from multiprocessing import Process
import os
import gc
#######################
# COMMONLY USED REGEX #
#######################
user_regex = re.compile('@\w+')
url_regex = re.compile('(https?:\/\/(?:www\.|(?!www))[^\s\.]+\.[^\s]{2,}|www\.[^\s]+\.[^\s]{2,})')
palette = [Color.red, Color.green, Color.yellow, Color.light_purple, Color.purple, Color.cyan, Color.light_gray, Color.black]
def process_df(global_df, fname, start, end, div, color):
# Function that gets run per thread
print(color("PID-%d: Processing %d to %d with %d" % (os.getpid(), start, end, div)))
# Loop data sets to create smaller datasets
directory = 'twitter_sentiment_data/'
part = (start // div) + 1
print(color('PID-%d: Starting with part %d') % (os.getpid(), part))
while end > start:
print(color('-- PID-%d: Garbage Collection --') % (os.getpid()))
gc.collect() # Garbage collect before continuing
print(color('PID-%d: Converting data set items %d~%d' %
(os.getpid(), start, start+div)))
df = convert_data(global_df[start:start+div], color)
f = directory + fname + '.%04d.%04d.csv' % (part, len(df))
print(color('PID-%d: Saving dataset %s') % (os.getpid(), f))
df.to_csv(path_or_buf=f, encoding='utf-8')
# Post
part+=1
start+=div
def formatted_tweet(tweet):
return url_regex.sub('URL', user_regex.sub('USER', tweet)).split()
def convert_data(df, color):
lower = np.vectorize(lambda x: x.lower()) # Vectorized function to capitalize string
print(color("PID-%d: Searching for all words in database" % (os.getpid())))
words = set([word for tweet in lower(df['text'])
for word in formatted_tweet(tweet)]) # Obtain all words
# Construct headers to be used, along with a map of the word to the index
print(color("PID-%d: Creating new headers and processing structure" % (os.getpid())))
new_headers = list(words)
new_headers.append('POLARITY')
mapper = {k: n for n, k in enumerate(new_headers)} # Map word to index
print(color("PID-%d: Detected %d headers" % (os.getpid(), len(new_headers))))
# Preallocate memory for the amount of data required
print(color("-- PID-%d: Preallocating memory for dataframe --" % (os.getpid())))
new_dataframe = pd.DataFrame(index=np.arange(0, len(df)), columns=new_headers)
print(color("PID-%d: Processing data..." % (os.getpid())))
for n, data in enumerate(zip(lower(df['text']), df['polarity'])):
tweet, polarity = data
if n % 100 == 0:
print(color("PID-%d: %10.2f%% done." % (os.getpid(), (n / len(df['text'])) * 100)))
# Generate zeros
new_frame = np.zeros(len(new_headers))
new_frame[mapper['POLARITY']] = polarity
for word in formatted_tweet(tweet):
new_frame[mapper[word]] += 1
# Done with counting words
new_dataframe.iloc[n] = new_frame # add to dataframe
return new_dataframe
def main():
# Preprocessing data
training_dir = "twitter_sentiment_data/training.1600000.processed.noemoticon.csv"
testing_dir = "twitter_sentiment_data/testdata.manual.2009.06.14.csv"
row_headers = ["polarity", "id", "date", "query", "user", "text"]
# Data to convert here
print("Preparing existing data")
training_data = pd.pandas.read_csv(training_dir,
names=row_headers,
usecols=[0, 4, 5], encoding='ISO-8859-1')
print("Shuffling data")
# Shuffle the training data since all the values are aligned
training_data = training_data.iloc[np.random.permutation(len(training_data))]
training_data = training_data.reset_index(drop=True)
test_data = pd.pandas.read_csv(testing_dir,
names=row_headers,
usecols=[0, 4, 5], encoding='ISO-8859-1')
# Convert data
print("Converting training data")
processes = []
start = 0
end = len(training_data)
div = 1000
for i in range(4):
proc = end // 4
if i == 3:
args = (training_data, 'training_data', start, end, div, palette[i])
process = Process(target=process_df, args=args)
processes.append(process)
else:
args = (training_data, 'training_data', start, start+proc, div, palette[i])
process = Process(target=process_df, args=args)
processes.append(process)
processes[i].start()
start += proc
print("Converting testing data")
# Training data is already taken care of
new_testing = convert_data(test_data)
# Save to disk
new_testing.to_csv(path_or_buf='twitter_sentiment_data/testing_data.csv', encoding='utf-8')
for p in processes:
p.join()
print("Finished Converting Test Data")
if __name__ == '__main__':
main()
|
epl-1.0
|
dssg/wikienergy
|
disaggregator/build/pandas/pandas/tseries/tests/test_resample.py
|
1
|
58842
|
# pylint: disable=E1101
from datetime import datetime, timedelta
from functools import partial
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, Panel, Index,
isnull, notnull, Timestamp)
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
from pandas.tseries.frequencies import MONTHS, DAYS
import pandas.tseries.offsets as offsets
import pandas as pd
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
class TestResample(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEqual(g.ngroups, 2593)
self.assertTrue(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEqual(len(r.columns), 10)
self.assertEqual(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assertEqual(result.index.name, 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00',
freq='min', name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = ['sum', 'mean', 'std', 'sem', 'max', 'min',
'median', 'first', 'last', 'ohlc']
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = s.resample('5min', how=arg,
closed='right', label='right')
expected = s.groupby(grouplist).agg(func)
self.assertEqual(result.index.name, 'index')
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M", how=fn)
df_lambda = df.resample("M", how=lambda x: str(type(x)))
df_partial = df.resample("M", how=partial(fn))
df_partial2 = df.resample("M", how=partial(fn, a=2))
df_class = df.resample("M", how=fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A' : np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days',freq='30T',periods=50)
df = DataFrame({'A' : np.arange(1480)},index=pd.to_timedelta(np.arange(1480),unit='T'))
result = df.resample('30T',how='sum')
assert_frame_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': ['date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s', how='sum')
expected = DataFrame({'value' : [4,9,4,2]},index=date_range('2014-11-08',freq='6s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('7s', how='sum')
expected = DataFrame({'value' : [4,10,4,1]},index=date_range('2014-11-08',freq='7s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('11s', how='sum')
expected = DataFrame({'value' : [11,8]},index=date_range('2014-11-08',freq='11s',periods=2))
assert_frame_equal(result,expected)
result = df.resample('13s', how='sum')
expected = DataFrame({'value' : [13,6]},index=date_range('2014-11-08',freq='13s',periods=2))
assert_frame_equal(result,expected)
result = df.resample('17s', how='sum')
expected = DataFrame({'value' : [16,3]},index=date_range('2014-11-08',freq='17s',periods=2))
assert_frame_equal(result,expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEqual(len(result), 3)
self.assertTrue((result.index.dayofweek == [6, 6, 6]).all())
self.assertEqual(result.irow(0), s['1/2/2005'])
self.assertEqual(result.irow(1), s['1/9/2005'])
self.assertEqual(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [0, 0]).all())
self.assertEqual(result.irow(0), s['1/3/2005'])
self.assertEqual(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [1, 1]).all())
self.assertEqual(result.irow(0), s['1/4/2005'])
self.assertEqual(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [2, 2]).all())
self.assertEqual(result.irow(0), s['1/5/2005'])
self.assertEqual(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [3, 3]).all())
self.assertEqual(result.irow(0), s['1/6/2005'])
self.assertEqual(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEqual(len(result), 2)
self.assertTrue((result.index.dayofweek == [4, 4]).all())
self.assertEqual(result.irow(0), s['1/7/2005'])
self.assertEqual(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEqual(len(result), 7)
self.assertTrue((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEqual(result.irow(0), s['1/2/2005'])
self.assertEqual(result.irow(1), s['1/3/2005'])
self.assertEqual(result.irow(5), s['1/9/2005'])
self.assertEqual(result.index.name, 'index')
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014','05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D')
self.assertEqual(result.index[0], dates[0])
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the resample frequency
import datetime
s = Series(np.arange(1.,6),index=[datetime.datetime(1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1.,6),index=date_range('19750101',periods=5,freq='D'))
result = s.resample('D',how='count')
assert_series_equal(result,Series(1,index=expected.index))
result1 = s.resample('D',how='sum')
result2 = s.resample('D',how='mean')
result3 = s.resample('D')
assert_series_equal(result1,expected)
assert_series_equal(result2,expected)
assert_series_equal(result3,expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assertEqual(result.index.freq, Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEqual(len(result), 12961)
self.assertEqual(result[0], s[0])
self.assertEqual(result[-1], s[-1])
self.assertEqual(result.index.name, 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEqual(len(result), len(expect))
self.assertEqual(len(result.columns), 4)
xs = result.irow(-2)
self.assertEqual(xs['open'], s[-6])
self.assertEqual(xs['high'], s[-6:-1].max())
self.assertEqual(xs['low'], s[-6:-1].min())
self.assertEqual(xs['close'], s[-2])
xs = result.irow(0)
self.assertEqual(xs['open'], s[0])
self.assertEqual(xs['high'], s[:5].max())
self.assertEqual(xs['low'], s[:5].min())
self.assertEqual(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEqual(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assertEqual(result.index.freq, offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assertTrue((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assertTrue((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assertTrue((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEqual(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M', how='sum'),
ts.resample('M', how=mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D', how=lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assertTrue(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assertTrue(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assertEqual(len(result), 0)
self.assertEqual(result.index.freqstr, 'A-DEC')
result = ts.resample('A', kind='period')
self.assertEqual(len(result), 0)
self.assertEqual(result.index.freqstr, 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q')
expected = df.resample('Q', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left')
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left')
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M')
self.assertEqual(len(resampled), 1)
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
result = ts.resample(freq, how='mean')
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pydata/pandas/issues/8683
s = pd.Series(np.random.randn(5),
index=pd.date_range('2014-10-14 23:06:23.206',
periods=3, freq='400L')
| pd.date_range('2014-10-15 23:00:00',
periods=2, freq='2200L'))
# Ensure left closing works
result = s.resample('2200L', 'mean')
self.assertEqual(result.index[-1],
pd.Timestamp('2014-10-15 23:00:02.000'))
# Ensure right closing works
result = s.resample('2200L', 'mean', label='right')
self.assertEqual(result.index[-1],
pd.Timestamp('2014-10-15 23:00:04.200'))
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left')
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
self.assertTrue(result.index.equals(ex_index))
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC')
self.assertEqual(len(result), 0)
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period')
self.assertEqual(len(result), 1)
self.assertEqual(result.index[0], Period('2000-04', freq='M'))
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d')
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min', how=['mean', 'sum'])
tm.assert_isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D', how='sum')
exp = ts.sort_index().resample('D', how='sum')
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64','int32','float64','float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype = dtype)
result = df.resample("T", how=lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T", how="median")
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M', how=lambda x: x.mean())
exp = ts.resample('M', how='mean')
tm.assert_series_equal(result, exp)
self.assertRaises(Exception, ts.resample, 'M',
how=[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result = ts.resample('M', how={'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
foo_exp = ts.resample('M', how='mean')
bar_exp = ts.resample('M', how='std')
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS', 'sum')
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = index=pd.date_range('2002-02-02', periods=4, freq='30T')
s=pd.Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min', fill_method='bfill', limit=2)
r10 = s.resample('10Min', fill_method='bfill')
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M', how='count')
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30',
'2014-10-31', '2014-11-30'], freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates))))
result = df.set_index('A').resample('M', how='count')
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return TimeSeries(np.random.randn(len(rng)), index=rng)
class TestResamplePeriodIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = ts.resample(targ, fill_method=meth,
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995',
freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec'), result)
assert_series_equal(ts.resample('a'), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
self.assertRaises(ValueError, ts.resample, 'a-dec')
self.assertRaises(ValueError, ts.resample, 'q-mar')
self.assertRaises(ValueError, ts.resample, 'M')
self.assertRaises(ValueError, ts.resample, 'w-thu')
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec')
resampled = result.resample('D', fill_method='ffill', convention='end')
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', fill_method='ffill', limit=2,
convention='end')
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D', fill_method='ffill')
exp = df['a'].resample('D', fill_method='ffill')
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M', fill_method='ffill')
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9,dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M', fill_method='ffill')
both = s.resample('M', how='last', fill_method='ffill').astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, fill_method='ffill',
convention=conv)
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp')
expected = ts.to_timestamp(how='end').resample('A-DEC')
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month, fill_method='ffill')
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how, fill_method='ffill')
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR', fill_method='ffill')
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = TimeSeries(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A')
expected = stamps.resample('A').to_period('A')
assert_series_equal(filled, expected)
filled = s.resample('A', fill_method='ffill')
expected = stamps.resample('A', fill_method='ffill').to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = TimeSeries(np.random.randn(5), index=rng)
self.assertRaises(Exception, s.resample, 'A')
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = TimeSeries(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min')
expected = ts.to_timestamp().resample('5min')
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D')
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s')
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_empty(self):
ts = _simple_pts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assertEqual(len(result), 0)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min', how=len)
expected = s.resample('10min', how=len).ix[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU')
self.assertTrue(result.isnull().all())
result = ts.resample('W-THU', fill_method='ffill')[:-1]
expected = ts.asfreq('W-THU', method='ffill')
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W')
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample('W').tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D')
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right')
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period')
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propogate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first']=np.random.randn(len(rng))
ts['second']=np.cumsum(np.random.randn(len(rng)))
expected = DataFrame({ 'first' : ts.resample('A',how=np.sum)['first'],
'second' : ts.resample('A',how=np.mean)['second'] },columns=['first','second'])
result = ts.resample('A', how={'first':np.sum, 'second':np.mean}).reindex(columns=['first','second'])
assert_frame_equal(result,expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', how='mean', closed='left', label='right')
exp = s[1:].resample('10min', how='mean', closed='left', label='right')
assert_series_equal(result, exp)
result = s.resample('10min', how='mean', closed='left', label='left')
exp = s[1:].resample('10min', how='mean', closed='left', label='left')
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
self.assertTrue(result.index.equals(ex_index))
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A')
exp = ts.to_timestamp().resample('A').to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', how='first', closed='left', label='left')
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101','20000201'))
res1 = foo.resample("BMS")
res2 = foo.resample("BMS").resample("B")
self.assertEqual(res1.index[0], Timestamp('20000103'))
self.assertEqual(res1.index[0], res2.index[0])
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span')
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right'))
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100,
freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq)
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left'))
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A", how='mean')
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3), index=date_range('2000-1-1', periods=9))
result = df.resample('5D')
expected = pd.concat([df.iloc[0:5].mean(),df.iloc[5:].mean()],axis=1).T
expected.index = [Timestamp('2000-1-1'),Timestamp('2000-1-6')]
assert_frame_equal(result,expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort()
index = date_range('2001-5-4',periods=4,freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D', how='count')
assert_frame_equal(result,expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D', how='sum')
assert_frame_equal(result,expected)
class TestTimeGrouper(tm.TestCase):
def setUp(self):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
grouper = TimeGrouper('A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.order()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
grouper = TimeGrouper('A', label='right', closed='right')
result = self.ts.resample('A', how='count')
expected = self.ts.groupby(lambda x: x.year).count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', how='prod', closed='right')
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
self.assertTrue(result.index.equals(df.index))
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
wp = pd.Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert(isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'PeriodIndex', 'Index', 'Float64Index',
'MultiIndex')
index_funcs = (tm.makeIntIndex, tm.makePeriodIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assertRaisesRegexp(TypeError,
"axis must be a DatetimeIndex, "
"but got an instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3),
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
"""
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
# if TimeGrouper is used included, 'first','last' and 'nth' doesn't work yet
def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]],
index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['size']:
normal_result = getattr(normal_grouped, func)()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# if NaT is included, 'var', 'std', 'mean', 'first','last' and 'nth' doesn't work yet
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
lqdc/okstereotype
|
okstereotype/xkcd_graphs/generate_xkcd.py
|
1
|
2236
|
#!/usr/bin/env python
'''
@file plot_stuff.py
@date Wed 23 Jan 2013 02:14:07 PM EST
@author Roman Sinayev
@email [email protected]
@detail
'''
from xkcd_generator import XKCDify
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
# import pylab
def find_nearest_idx(array,value):
return np.abs(array-value).argmin()
def index_to_xloc(index):
return (index+3)*1.4/29
def plot_essay_len(l):
np.random.seed(12)
l = l/1000.
y = np.array([42042, 37808, 34124, 29091, 23452, 18980, 15201, 11876, 9578, 7645, 5976, 4715, 3735, 2817, 2169, 1703, 1431, 1142, 825, 670, 570, 439, 350, 334, 254, 234])/100000.
x = np.arange(150,1450,50)/1000.
l_idx = find_nearest_idx(x,l)
x_loc = index_to_xloc(l_idx) #x end of the line
y_loc = y[l_idx] #y end of the line
x_start = 1.0 #location of x start of the line
y_start = 0.32 #location of y start of the line
dist_from_line = 0.05
dx = x_loc - x_start
dy = y_loc - y_start
d_mag = np.sqrt(dx * dx + dy * dy)
new_x = x_loc - dx/ d_mag * dist_from_line #new location offset by distance from line
new_y = y_loc - dy/ d_mag * dist_from_line
# ax = pylab.axes()
fig=Figure(figsize=(6.7,5.0),dpi=96)
ax=fig.add_subplot(1,1,1)
ax.plot(x, y, 'b', lw=1, label='damped sine', c='#0B6EC3')
ax.set_title('Essay Word Count')
ax.set_xlabel('# of words')
ax.set_ylabel('# of users')
ax.text(0.9, .35, "You are here")
ax.plot([x_start, new_x], [y_start, new_y], '-k', lw=0.85)
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 0.43)
for i in [.150, .500,1.000]:
ax.text(index_to_xloc(find_nearest_idx(x,i))-.03, -0.03, "%.0f" % (i*1000))
#XKCDify the axes -- this operates in-place
XKCDify(ax, xaxis_loc=0.0, yaxis_loc=0.0,
xaxis_arrow='+-', yaxis_arrow='+-',
expand_axes=True)
# ax.add_patch(pylab.Circle((index_to_xloc(l_idx),y[l_idx]),radius=0.02, alpha=1.0, antialiased=True, fc="#BD362F", ec="none"))
# pylab.show()
canvas = FigureCanvas(fig)
return fig
if __name__ == '__main__':
plot_essay_len(800)
|
bsd-3-clause
|
penguintantin/Flir_lepton
|
DispLepton3rd.py
|
1
|
4170
|
#!/usr/bin/python
import os
import sys
import matplotlib.pyplot as plt
import math
import time
import glob
import numpy as np
from matplotlib.widgets import Button
from matplotlib import cm
from mpsse import *
import lepton3rd
Read=True
maxval=0
ImgID=0
class Index:
ind = 0
def __init__(self, lep):
self.lepton=lep
def reset(self, event):
print("Reset")
def save(self, event):
print("save")
saveimage(self.lepton)
def stop(self, event):
global Read
print("stop")
Read=False
def start(self, event):
global Read
print("start")
Read=True
def next(self, event):
self.ind += 1
i = self.ind % len(freqs)
ydata = np.sin(2*np.pi*freqs[i]*t)
l.set_ydata(ydata)
plt.draw()
def prev(self, event):
self.ind -= 1
i = self.ind % len(freqs)
ydata = np.sin(2*np.pi*freqs[i]*t)
l.set_ydata(ydata)
plt.draw()
def main():
lep=lepton3rd.Lepton()
#Set Tlm
lepton3rd.TlmSetEna(lep)
lepton3rd.GetLeptonImg(lep)
fpa_temp=lep.tlm[0][48]*256+lep.tlm[0][49]
diff = lep.maxval-lep.minval
fig, ax = plt.subplots()
ax.set_title('Image')
#cax = ax.imshow(data, interpolation='nearest', cmap=cm.coolwarm, picker=True)
#cax = ax.imshow(data, interpolation='nearest', cmap=cm.coolwarm, picker=5)
cax = ax.imshow(lep.data, interpolation='nearest')
cbar = fig.colorbar(cax, ticks=[0, diff/2, diff])
cbar.ax.set_yticklabels([lep.minval, lep.minval+diff/2, lep.maxval]) # vertically oriented colorbar
#fig.canvas.mpl_connect('pick_event', onpick)
#'''
callback = Index(lep)
axstart = plt.axes([0.3, 0.05, 0.1, 0.075])
axstop = plt.axes([0.41, 0.05, 0.1, 0.075])
axsave = plt.axes([0.52, 0.05, 0.1, 0.075])
axreset = plt.axes([0.63, 0.05, 0.1, 0.075])
bsave = Button(axsave, 'Save')
bsave.on_clicked(callback.save)
breset = Button(axreset, 'Reset')
breset.on_clicked(callback.reset)
bstart = Button(axstart, 'Start')
bstart.on_clicked(callback.start)
bstop = Button(axstop, 'Stop')
bstop.on_clicked(callback.stop)
#'''
plt.ion()
plt.show()
while 1:
maxval=0
minval=0xffff
if Read:
lepton3rd.GetLeptonImg(lep)
print "Redraw",lep.maxval,lep.minval
#print "FPA temp=",TlmData[0][48]*256+TlmData[0][49]
fpa_temp=lep.tlm[0][48]*256+lep.tlm[0][49]
diff = lep.maxval-lep.minval
#ax.cla()
#ax.imshow(data)
#ax.imshow(data, interpolation='nearest', cmap=cm.coolwarm, picker=5)
cax = ax.imshow(lep.data, interpolation='nearest')
#cbar = fig.colorbar(cax, ticks=[0, diff/2, diff])
#cbar.ax.set_yticklabels([minval, minval+diff/2, maxval])
#cax.set_data(data) #OK .Faster?
#ax.set_title('Image')
#plt.draw() #OK
fig.canvas.blit() #OK
#time.sleep(0.5) ##NG!!!
#plt.pause(0.05) #OK
#http://stackoverflow.com/questions/3441874/matplotlib-animation-either-freezes-after-a-few-frames-or-just-doesnt-work
#fig.canvas.blit() # or draw()
fig.canvas.start_event_loop(0.0001) #OK
#def onpick(event):
# if event.xdata != None and event.ydata != None:
# print(event.xdata, event.ydata)
#def onpick_ng(event):
# thisline = event.artist
# xdata, ydata = thisline.get_data()
# ind = event.ind
# print('on pick line:', zip(xdata[ind], ydata[ind]))
#def onpick(event):
#ind = event.ind #from matplotlib.lines import Line2D
#print('onpick3 scatter:', ind, np.take(x, ind), np.take(y, ind))
# print ('val:', event.xdata,event.ydata)
def saveimage(lep):
global ImgID
maxval=0
minval=0xffff
fpa_temp=lep.tlm[0][48]*256+lep.tlm[0][49]
img_file="./img2/img_" + str(ImgID) + ".pgm"
diff = lep.maxval-lep.minval
pgm=open(img_file,'w')
data="P2\n160 120\n" + str(int(lep.maxval))+" "+str(int(lep.minval))+" "+str(int(fpa_temp)) + "\n"
for i in range(lepton3rd.ImageY):
for j in range(lepton3rd.ImageX):
#data+=str(lep.image[i][j]-lep.minval)+" "
data+=str(int(lep.image[i][j]))+" "
data+="\n"
data+="\n\n"
pgm.write(data)
pgm.close()
ImgID+=1
def open_file(dirname, filename):
file_name = os.path.join(dirname, filename)
try:
f = open(file_name,'r')
except IOError, (errno, strerror):
print "Unable to open the file: \"" + file_name + "\"\n"
return []
else:
ret = f.read()
return ret.split("\n")
if __name__ == "__main__":
main()
|
mit
|
smharper/openmc
|
openmc/tallies.py
|
2
|
123554
|
from collections.abc import Iterable, MutableSequence
import copy
from functools import partial, reduce
from itertools import product
from numbers import Integral, Real
import operator
from pathlib import Path
from xml.etree import ElementTree as ET
import h5py
import numpy as np
import pandas as pd
import scipy.sparse as sps
import openmc
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# The tally arithmetic product types. The tensor product performs the full
# cross product of the data in two tallies with respect to a specified axis
# (filters, nuclides, or scores). The entrywise product performs the arithmetic
# operation entrywise across the entries in two tallies with respect to a
# specified axis.
_PRODUCT_TYPES = ['tensor', 'entrywise']
# The following indicate acceptable types when setting Tally.scores,
# Tally.nuclides, and Tally.filters
_SCORE_CLASSES = (str, openmc.CrossScore, openmc.AggregateScore)
_NUCLIDE_CLASSES = (str, openmc.CrossNuclide, openmc.AggregateNuclide)
_FILTER_CLASSES = (openmc.Filter, openmc.CrossFilter, openmc.AggregateFilter)
# Valid types of estimators
ESTIMATOR_TYPES = ['tracklength', 'collision', 'analog']
class Tally(IDManagerMixin):
"""A tally defined by a set of scores that are accumulated for a list of
nuclides given a set of filters.
Parameters
----------
tally_id : int, optional
Unique identifier for the tally. If none is specified, an identifier
will automatically be assigned
name : str, optional
Name of the tally. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the tally
name : str
Name of the tally
filters : list of openmc.Filter
List of specified filters for the tally
nuclides : list of openmc.Nuclide
List of nuclides to score results for
scores : list of str
List of defined scores, e.g. 'flux', 'fission', etc.
estimator : {'analog', 'tracklength', 'collision'}
Type of estimator for the tally
triggers : list of openmc.Trigger
List of tally triggers
num_scores : int
Total number of scores
num_filter_bins : int
Total number of filter bins accounting for all filters
num_bins : int
Total number of bins for the tally
shape : 3-tuple of int
The shape of the tally data array ordered as the number of filter bins,
nuclide bins and score bins
filter_strides : list of int
Stride in memory for each filter
num_realizations : int
Total number of realizations
with_summary : bool
Whether or not a Summary has been linked
sum : numpy.ndarray
An array containing the sum of each independent realization for each bin
sum_sq : numpy.ndarray
An array containing the sum of each independent realization squared for
each bin
mean : numpy.ndarray
An array containing the sample mean for each bin
std_dev : numpy.ndarray
An array containing the sample standard deviation for each bin
derived : bool
Whether or not the tally is derived from one or more other tallies
sparse : bool
Whether or not the tally uses SciPy's LIL sparse matrix format for
compressed data storage
derivative : openmc.TallyDerivative
A material perturbation derivative to apply to all scores in the tally.
"""
next_id = 1
used_ids = set()
def __init__(self, tally_id=None, name=''):
# Initialize Tally class attributes
self.id = tally_id
self.name = name
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters')
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides')
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores')
self._estimator = None
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers')
self._derivative = None
self._num_realizations = 0
self._with_summary = False
self._sum = None
self._sum_sq = None
self._mean = None
self._std_dev = None
self._with_batch_statistics = False
self._derived = False
self._sparse = False
self._sp_filename = None
self._results_read = False
def __repr__(self):
parts = ['Tally']
parts.append('{: <15}=\t{}'.format('ID', self.id))
parts.append('{: <15}=\t{}'.format('Name', self.name))
if self.derivative is not None:
parts.append('{: <15}=\t{}'.format('Derivative ID', self.derivative.id))
filters = ', '.join(type(f).__name__ for f in self.filters)
parts.append('{: <15}=\t{}'.format('Filters', filters))
nuclides = ' '.join(str(nuclide) for nuclide in self.nuclides)
parts.append('{: <15}=\t{}'.format('Nuclides', nuclides))
parts.append('{: <15}=\t{}'.format('Scores', self.scores))
parts.append('{: <15}=\t{}'.format('Estimator', self.estimator))
return '\n\t'.join(parts)
@property
def name(self):
return self._name
@property
def filters(self):
return self._filters
@property
def nuclides(self):
return self._nuclides
@property
def num_nuclides(self):
return len(self._nuclides)
@property
def scores(self):
return self._scores
@property
def num_scores(self):
return len(self._scores)
@property
def num_filters(self):
return len(self.filters)
@property
def num_filter_bins(self):
return reduce(operator.mul, (f.num_bins for f in self.filters), 1)
@property
def num_bins(self):
return self.num_filter_bins * self.num_nuclides * self.num_scores
@property
def shape(self):
return (self.num_filter_bins, self.num_nuclides, self.num_scores)
@property
def estimator(self):
return self._estimator
@property
def triggers(self):
return self._triggers
@property
def num_realizations(self):
return self._num_realizations
@property
def with_summary(self):
return self._with_summary
def _read_results(self):
if self._results_read:
return
# Open the HDF5 statepoint file
with h5py.File(self._sp_filename, 'r') as f:
# Extract Tally data from the file
data = f['tallies/tally {}/results'.format(self.id)]
sum_ = data[:, :, 0]
sum_sq = data[:, :, 1]
# Reshape the results arrays
sum_ = np.reshape(sum_, self.shape)
sum_sq = np.reshape(sum_sq, self.shape)
# Set the data for this Tally
self._sum = sum_
self._sum_sq = sum_sq
# Convert NumPy arrays to SciPy sparse LIL matrices
if self.sparse:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape)
# Indicate that Tally results have been read
self._results_read = True
@property
def sum(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum.toarray(), self.shape)
else:
return self._sum
@property
def sum_sq(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum_sq.toarray(), self.shape)
else:
return self._sum_sq
@property
def mean(self):
if self._mean is None:
if not self._sp_filename:
return None
self._mean = self.sum / self.num_realizations
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self.sparse:
return np.reshape(self._mean.toarray(), self.shape)
else:
return self._mean
@property
def std_dev(self):
if self._std_dev is None:
if not self._sp_filename:
return None
n = self.num_realizations
nonzero = np.abs(self.mean) > 0
self._std_dev = np.zeros_like(self.mean)
self._std_dev[nonzero] = np.sqrt((self.sum_sq[nonzero]/n -
self.mean[nonzero]**2)/(n - 1))
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self.with_batch_statistics = True
if self.sparse:
return np.reshape(self._std_dev.toarray(), self.shape)
else:
return self._std_dev
@property
def with_batch_statistics(self):
return self._with_batch_statistics
@property
def derived(self):
return self._derived
@property
def derivative(self):
return self._derivative
@property
def sparse(self):
return self._sparse
@estimator.setter
def estimator(self, estimator):
cv.check_value('estimator', estimator, ESTIMATOR_TYPES)
self._estimator = estimator
@triggers.setter
def triggers(self, triggers):
cv.check_type('tally triggers', triggers, MutableSequence)
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers',
triggers)
@name.setter
def name(self, name):
cv.check_type('tally name', name, str, none_ok=True)
self._name = name
@derivative.setter
def derivative(self, deriv):
cv.check_type('tally derivative', deriv, openmc.TallyDerivative,
none_ok=True)
self._derivative = deriv
@filters.setter
def filters(self, filters):
cv.check_type('tally filters', filters, MutableSequence)
# If the filter is already in the Tally, raise an error
visited_filters = set()
for f in filters:
if f in visited_filters:
msg = 'Unable to add a duplicate filter "{}" to Tally ID="{}" ' \
'since duplicate filters are not supported in the OpenMC ' \
'Python API'.format(f, self.id)
raise ValueError(msg)
visited_filters.add(f)
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters', filters)
@nuclides.setter
def nuclides(self, nuclides):
cv.check_type('tally nuclides', nuclides, MutableSequence)
# If the nuclide is already in the Tally, raise an error
visited_nuclides = set()
for nuc in nuclides:
if nuc in visited_nuclides:
msg = 'Unable to add a duplicate nuclide "{}" to Tally ID="{}" ' \
'since duplicate nuclides are not supported in the OpenMC ' \
'Python API'.format(nuclide, self.id)
raise ValueError(msg)
visited_nuclides.add(nuc)
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides',
nuclides)
@scores.setter
def scores(self, scores):
cv.check_type('tally scores', scores, MutableSequence)
visited_scores = set()
for i, score in enumerate(scores):
# If the score is already in the Tally, raise an error
if score in visited_scores:
msg = 'Unable to add a duplicate score "{}" to Tally ID="{}" ' \
'since duplicate scores are not supported in the OpenMC ' \
'Python API'.format(score, self.id)
raise ValueError(msg)
visited_scores.add(score)
# If score is a string, strip whitespace
if isinstance(score, str):
# Check to see if scores are deprecated before storing
for deprecated in ['scatter-', 'nu-scatter-', 'scatter-p',
'nu-scatter-p', 'scatter-y', 'nu-scatter-y',
'flux-y', 'total-y']:
if score.strip().startswith(deprecated):
msg = score.strip() + ' is no longer supported.'
raise ValueError(msg)
scores[i] = score.strip()
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores', scores)
@num_realizations.setter
def num_realizations(self, num_realizations):
cv.check_type('number of realizations', num_realizations, Integral)
cv.check_greater_than('number of realizations', num_realizations, 0, True)
self._num_realizations = num_realizations
@with_summary.setter
def with_summary(self, with_summary):
cv.check_type('with_summary', with_summary, bool)
self._with_summary = with_summary
@with_batch_statistics.setter
def with_batch_statistics(self, with_batch_statistics):
cv.check_type('with_batch_statistics', with_batch_statistics, bool)
self._with_batch_statistics = with_batch_statistics
@sum.setter
def sum(self, sum):
cv.check_type('sum', sum, Iterable)
self._sum = sum
@sum_sq.setter
def sum_sq(self, sum_sq):
cv.check_type('sum_sq', sum_sq, Iterable)
self._sum_sq = sum_sq
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within the Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
# Convert NumPy arrays to SciPy sparse LIL matrices
if sparse and not self.sparse:
if self._sum is not None:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
if self._sum_sq is not None:
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(),
self._sum_sq.shape)
if self._mean is not None:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self._std_dev is not None:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self._sparse = True
# Convert SciPy sparse LIL matrices to NumPy arrays
elif not sparse and self.sparse:
if self._sum is not None:
self._sum = np.reshape(self._sum.toarray(), self.shape)
if self._sum_sq is not None:
self._sum_sq = np.reshape(self._sum_sq.toarray(), self.shape)
if self._mean is not None:
self._mean = np.reshape(self._mean.toarray(), self.shape)
if self._std_dev is not None:
self._std_dev = np.reshape(self._std_dev.toarray(), self.shape)
self._sparse = False
def remove_score(self, score):
"""Remove a score from the tally
Parameters
----------
score : str
Score to remove
"""
if score not in self.scores:
msg = 'Unable to remove score "{}" from Tally ID="{}" since ' \
'the Tally does not contain this score'.format(score, self.id)
raise ValueError(msg)
self._scores.remove(score)
def remove_filter(self, old_filter):
"""Remove a filter from the tally
Parameters
----------
old_filter : openmc.Filter
Filter to remove
"""
if old_filter not in self.filters:
msg = 'Unable to remove filter "{}" from Tally ID="{}" since the ' \
'Tally does not contain this filter'.format(old_filter, self.id)
raise ValueError(msg)
self._filters.remove(old_filter)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the tally
Parameters
----------
nuclide : openmc.Nuclide
Nuclide to remove
"""
if nuclide not in self.nuclides:
msg = 'Unable to remove nuclide "{}" from Tally ID="{}" since the ' \
'Tally does not contain this nuclide'.format(nuclide, self.id)
raise ValueError(msg)
self._nuclides.remove(nuclide)
def _can_merge_filters(self, other):
"""Determine if another tally's filters can be merged with this one's
The types of filters between the two tallies must match identically.
The bins in all of the filters must match identically, or be mergeable
in only one filter. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable filters
"""
# Two tallies must have the same number of filters
if len(self.filters) != len(other.filters):
return False
# Return False if only one tally has a delayed group filter
tally1_dg = self.contains_filter(openmc.DelayedGroupFilter)
tally2_dg = other.contains_filter(openmc.DelayedGroupFilter)
if tally1_dg != tally2_dg:
return False
# Look to see if all filters are the same, or one or more can be merged
for filter1 in self.filters:
mergeable = False
for filter2 in other.filters:
if filter1 == filter2 or filter1.can_merge(filter2):
mergeable = True
break
# If no mergeable filter was found, the tallies are not mergeable
if not mergeable:
return False
# Tally filters are mergeable if all conditional checks passed
return True
def _can_merge_nuclides(self, other):
"""Determine if another tally's nuclides can be merged with this one's
The nuclides between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable nuclides
"""
no_nuclides_match = True
all_nuclides_match = True
# Search for each of this tally's nuclides in the other tally
for nuclide in self.nuclides:
if nuclide not in other.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Search for each of the other tally's nuclides in this tally
for nuclide in other.nuclides:
if nuclide not in self.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Either all nuclides should match, or none should
return no_nuclides_match or all_nuclides_match
def _can_merge_scores(self, other):
"""Determine if another tally's scores can be merged with this one's
The scores between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable scores
"""
no_scores_match = True
all_scores_match = True
# Search for each of this tally's scores in the other tally
for score in self.scores:
if score in other.scores:
no_scores_match = False
# Search for each of the other tally's scores in this tally
for score in other.scores:
if score not in self.scores:
all_scores_match = False
else:
no_scores_match = False
if score == 'current' and score not in self.scores:
return False
# Nuclides cannot be specified on 'flux' scores
if 'flux' in self.scores or 'flux' in other.scores:
if self.nuclides != other.nuclides:
return False
# Either all scores should match, or none should
return no_scores_match or all_scores_match
def can_merge(self, other):
"""Determine if another tally can be merged with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to check for merging
"""
if not isinstance(other, Tally):
return False
# Must have same estimator
if self.estimator != other.estimator:
return False
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
equality = [equal_filters, equal_nuclides, equal_scores]
# If all filters, nuclides and scores match then tallies are mergeable
if all(equality):
return True
# Variables to indicate filter bins, nuclides, and scores that can be merged
can_merge_filters = self._can_merge_filters(other)
can_merge_nuclides = self._can_merge_nuclides(other)
can_merge_scores = self._can_merge_scores(other)
mergeability = [can_merge_filters, can_merge_nuclides, can_merge_scores]
if not all(mergeability):
return False
# If the tally results have been read from the statepoint, at least two
# of filters, nuclides and scores must match
else:
return not self._results_read or sum(equality) >= 2
def merge(self, other):
"""Merge another tally with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to merge with this one
Returns
-------
merged_tally : openmc.Tally
Merged tallies
"""
if not self.can_merge(other):
msg = 'Unable to merge tally ID="{}" with "{}"'.format(
other.id, self.id)
raise ValueError(msg)
# Create deep copy of tally to return as merged tally
merged_tally = copy.deepcopy(self)
# Differentiate Tally with a new auto-generated Tally ID
merged_tally.id = None
# Create deep copy of other tally to use for array concatenation
other_copy = copy.deepcopy(other)
# Identify if filters, nuclides and scores are mergeable and/or equal
merge_filters = self._can_merge_filters(other)
merge_nuclides = self._can_merge_nuclides(other)
merge_scores = self._can_merge_scores(other)
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
# If two tallies can be merged along a filter's bins
if merge_filters and not equal_filters:
# Search for mergeable filters
for i, filter1 in enumerate(self.filters):
for filter2 in other.filters:
if filter1 != filter2 and filter1.can_merge(filter2):
other_copy._swap_filters(other_copy.filters[i], filter2)
merged_tally.filters[i] = filter1.merge(filter2)
join_right = filter1 < filter2
merge_axis = i
break
# If two tallies can be merged along nuclide bins
if merge_nuclides and not equal_nuclides:
merge_axis = self.num_filters
join_right = True
# Add unique nuclides from other tally to merged tally
for nuclide in other.nuclides:
if nuclide not in merged_tally.nuclides:
merged_tally.nuclides.append(nuclide)
# If two tallies can be merged along score bins
if merge_scores and not equal_scores:
merge_axis = self.num_filters + 1
join_right = True
# Add unique scores from other tally to merged tally
for score in other.scores:
if score not in merged_tally.scores:
merged_tally.scores.append(score)
# Add triggers from other tally to merged tally
for trigger in other.triggers:
merged_tally.triggers.append(trigger)
# If results have not been read, then return tally for input generation
if self._results_read is None:
return merged_tally
# Otherwise, this is a derived tally which needs merged results arrays
else:
self._derived = True
# Concatenate sum arrays if present in both tallies
if self.sum is not None and other_copy.sum is not None:
self_sum = self.get_reshaped_data(value='sum')
other_sum = other_copy.get_reshaped_data(value='sum')
if join_right:
merged_sum = np.concatenate((self_sum, other_sum),
axis=merge_axis)
else:
merged_sum = np.concatenate((other_sum, self_sum),
axis=merge_axis)
merged_tally._sum = np.reshape(merged_sum, merged_tally.shape)
# Concatenate sum_sq arrays if present in both tallies
if self.sum_sq is not None and other.sum_sq is not None:
self_sum_sq = self.get_reshaped_data(value='sum_sq')
other_sum_sq = other_copy.get_reshaped_data(value='sum_sq')
if join_right:
merged_sum_sq = np.concatenate((self_sum_sq, other_sum_sq),
axis=merge_axis)
else:
merged_sum_sq = np.concatenate((other_sum_sq, self_sum_sq),
axis=merge_axis)
merged_tally._sum_sq = np.reshape(merged_sum_sq, merged_tally.shape)
# Concatenate mean arrays if present in both tallies
if self.mean is not None and other.mean is not None:
self_mean = self.get_reshaped_data(value='mean')
other_mean = other_copy.get_reshaped_data(value='mean')
if join_right:
merged_mean = np.concatenate((self_mean, other_mean),
axis=merge_axis)
else:
merged_mean = np.concatenate((other_mean, self_mean),
axis=merge_axis)
merged_tally._mean = np.reshape(merged_mean, merged_tally.shape)
# Concatenate std. dev. arrays if present in both tallies
if self.std_dev is not None and other.std_dev is not None:
self_std_dev = self.get_reshaped_data(value='std_dev')
other_std_dev = other_copy.get_reshaped_data(value='std_dev')
if join_right:
merged_std_dev = np.concatenate((self_std_dev, other_std_dev),
axis=merge_axis)
else:
merged_std_dev = np.concatenate((other_std_dev, self_std_dev),
axis=merge_axis)
merged_tally._std_dev = np.reshape(merged_std_dev, merged_tally.shape)
# Sparsify merged tally if both tallies are sparse
merged_tally.sparse = self.sparse and other.sparse
return merged_tally
def to_xml_element(self):
"""Return XML representation of the tally
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing tally data
"""
element = ET.Element("tally")
# Tally ID
element.set("id", str(self.id))
# Optional Tally name
if self.name != '':
element.set("name", self.name)
# Optional Tally filters
if len(self.filters) > 0:
subelement = ET.SubElement(element, "filters")
subelement.text = ' '.join(str(f.id) for f in self.filters)
# Optional Nuclides
if self.nuclides:
subelement = ET.SubElement(element, "nuclides")
subelement.text = ' '.join(str(n) for n in self.nuclides)
# Scores
if len(self.scores) == 0:
msg = 'Unable to get XML for Tally ID="{}" since it does not ' \
'contain any scores'.format(self.id)
raise ValueError(msg)
else:
scores = ''
for score in self.scores:
scores += '{} '.format(score)
subelement = ET.SubElement(element, "scores")
subelement.text = scores.rstrip(' ')
# Tally estimator type
if self.estimator is not None:
subelement = ET.SubElement(element, "estimator")
subelement.text = self.estimator
# Optional Triggers
for trigger in self.triggers:
trigger.get_trigger_xml(element)
# Optional derivatives
if self.derivative is not None:
subelement = ET.SubElement(element, "derivative")
subelement.text = str(self.derivative.id)
return element
def contains_filter(self, filter_type):
"""Looks for a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : bool
True if the tally contains a filter of the requested type;
otherwise false
"""
for test_filter in self.filters:
if type(test_filter) is filter_type:
return True
return False
def find_filter(self, filter_type):
"""Return a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : openmc.Filter
Filter from this tally with matching type, or None if no matching
Filter is found
Raises
------
ValueError
If no matching Filter is found
"""
# Look through all of this Tally's Filters for the type requested
for test_filter in self.filters:
if type(test_filter) is filter_type:
return test_filter
# Also check to see if the desired filter is wrapped up in an
# aggregate
elif isinstance(test_filter, openmc.AggregateFilter):
if isinstance(test_filter.aggregate_filter, filter_type):
return test_filter
# If we did not find the Filter, throw an Exception
msg = 'Unable to find filter type "{}" in Tally ID="{}"'.format(
filter_type, self.id)
raise ValueError(msg)
def get_nuclide_index(self, nuclide):
"""Returns the index in the Tally's results array for a Nuclide bin
Parameters
----------
nuclide : str
The name of the Nuclide (e.g., 'H1', 'U238')
Returns
-------
nuclide_index : int
The index in the Tally data array for this nuclide.
Raises
------
KeyError
When the argument passed to the 'nuclide' parameter cannot be found
in the Tally.
"""
# Look for the user-requested nuclide in all of the Tally's Nuclides
for i, test_nuclide in enumerate(self.nuclides):
# If the Summary was linked, then values are Nuclide objects
if isinstance(test_nuclide, openmc.Nuclide):
if test_nuclide.name == nuclide:
return i
# If the Summary has not been linked, then values are ZAIDs
else:
if test_nuclide == nuclide:
return i
msg = ('Unable to get the nuclide index for Tally since "{}" '
'is not one of the nuclides'.format(nuclide))
raise KeyError(msg)
def get_score_index(self, score):
"""Returns the index in the Tally's results array for a score bin
Parameters
----------
score : str
The score string (e.g., 'absorption', 'nu-fission')
Returns
-------
score_index : int
The index in the Tally data array for this score.
Raises
------
ValueError
When the argument passed to the 'score' parameter cannot be found in
the Tally.
"""
try:
score_index = self.scores.index(score)
except ValueError:
msg = 'Unable to get the score index for Tally since "{}" ' \
'is not one of the scores'.format(score)
raise ValueError(msg)
return score_index
def get_filter_indices(self, filters=[], filter_bins=[]):
"""Get indices into the filter axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the filter
axis of the tally's data array (axis=0) for particular combinations
of filters and their corresponding bins.
Parameters
----------
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : Iterable of tuple
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bin is an integer ID for Material-, Surface-,
Cell-, Cellborn-, and Universe- Filters. Each bin is an integer
for the cell instance ID for DistribcellFilters. Each bin is a
2-tuple of floats for Energy- and Energyout- Filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for MeshFilters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
Returns
-------
numpy.ndarray
A NumPy array of the filter indices
"""
cv.check_type('filters', filters, Iterable, openmc.FilterMeta)
cv.check_type('filter_bins', filter_bins, Iterable, tuple)
# If user did not specify any specific Filters, use them all
if not filters:
return np.arange(self.num_filter_bins)
# Initialize empty list of indices for each bin in each Filter
filter_indices = []
# Loop over all of the Tally's Filters
for i, self_filter in enumerate(self.filters):
# If a user-requested Filter, get the user-requested bins
for j, test_filter in enumerate(filters):
if type(self_filter) is test_filter:
bins = filter_bins[j]
break
else:
# If not a user-requested Filter, get all bins
if isinstance(self_filter, openmc.DistribcellFilter):
# Create list of cell instance IDs for distribcell Filters
bins = list(range(self_filter.num_bins))
elif isinstance(self_filter, openmc.EnergyFunctionFilter):
# EnergyFunctionFilters don't have bins so just add a None
bins = [None]
else:
# Create list of IDs for bins for all other filter types
bins = self_filter.bins
# Add indices for each bin in this Filter to the list
indices = np.array([self_filter.get_bin_index(b) for b in bins])
filter_indices.append(indices)
# Account for stride in each of the previous filters
for indices in filter_indices[:i]:
indices *= self_filter.num_bins
# Apply outer product sum between all filter bin indices
return list(map(sum, product(*filter_indices)))
def get_nuclide_indices(self, nuclides):
"""Get indices into the nuclide axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the nuclide
axis of the tally's data array (axis=1) for one or more nuclides.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the nuclide indices
"""
cv.check_iterable_type('nuclides', nuclides, str)
# If user did not specify any specific Nuclides, use them all
if not nuclides:
return np.arange(self.num_nuclides)
# Determine the score indices from any of the requested scores
nuclide_indices = np.zeros_like(nuclides, dtype=int)
for i, nuclide in enumerate(nuclides):
nuclide_indices[i] = self.get_nuclide_index(nuclide)
return nuclide_indices
def get_score_indices(self, scores):
"""Get indices into the score axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the score
axis of the tally's data array (axis=2) for one or more scores.
Parameters
----------
scores : list of str or openmc.CrossScore
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the score indices
"""
for score in scores:
if not isinstance(score, (str, openmc.CrossScore)):
msg = 'Unable to get score indices for score "{}" in Tally ' \
'ID="{}" since it is not a string or CrossScore'\
.format(score, self.id)
raise ValueError(msg)
# Determine the score indices from any of the requested scores
if scores:
score_indices = np.zeros(len(scores), dtype=int)
for i, score in enumerate(scores):
score_indices[i] = self.get_score_index(score)
# If user did not specify any specific scores, use them all
else:
score_indices = np.arange(self.num_scores)
return score_indices
def get_values(self, scores=[], filters=[], filter_bins=[],
nuclides=[], value='mean'):
"""Returns one or more tallied values given a list of scores, filters,
filter bins and nuclides.
This method constructs a 3D NumPy array for the requested Tally data
indexed by filter bin, nuclide bin, and score index. The method will
order the data in the array as specified in the parameter lists.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : list of Iterables
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bins is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
float or numpy.ndarray
A scalar or NumPy array of the Tally data indexed in the order
each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data,
or the input parameters do not correspond to the Tally's attributes,
e.g., if the score(s) do not match those in the Tally.
"""
# Ensure that the tally has data
if (value == 'mean' and self.mean is None) or \
(value == 'std_dev' and self.std_dev is None) or \
(value == 'rel_err' and self.mean is None) or \
(value == 'sum' and self.sum is None) or \
(value == 'sum_sq' and self.sum_sq is None):
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise ValueError(msg)
# Get filter, nuclide and score indices
filter_indices = self.get_filter_indices(filters, filter_bins)
nuclide_indices = self.get_nuclide_indices(nuclides)
score_indices = self.get_score_indices(scores)
# Construct outer product of all three index types with each other
indices = np.ix_(filter_indices, nuclide_indices, score_indices)
# Return the desired result from Tally
if value == 'mean':
data = self.mean[indices]
elif value == 'std_dev':
data = self.std_dev[indices]
elif value == 'rel_err':
data = self.std_dev[indices] / self.mean[indices]
elif value == 'sum':
data = self.sum[indices]
elif value == 'sum_sq':
data = self.sum_sq[indices]
else:
msg = 'Unable to return results from Tally ID="{}" since the ' \
'the requested value "{}" is not \'mean\', \'std_dev\', ' \
'\'rel_err\', \'sum\', or \'sum_sq\''.format(self.id, value)
raise LookupError(msg)
return data
def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True,
derivative=True, paths=True, float_format='{:.2e}'):
"""Build a Pandas DataFrame for the Tally data.
This method constructs a Pandas DataFrame object for the Tally data
with columns annotated by filter, nuclide and score bin information.
This capability has been tested for Pandas >=0.13.1. However, it is
recommended to use v0.16 or newer versions of Pandas since this method
uses the Multi-index Pandas feature.
Parameters
----------
filters : bool
Include columns with filter bin information (default is True).
nuclides : bool
Include columns with nuclide bin information (default is True).
scores : bool
Include columns with score bin information (default is True).
derivative : bool
Include columns with differential tally info (default is True).
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into a
Multi-index column with a geometric "path" to each distribcell
instance.
float_format : str
All floats in the DataFrame will be formatted using the given
format string before printing.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with each column annotated by filter, nuclide and
score bin information (if these parameters are True), and the mean
and standard deviation of the Tally's data.
Raises
------
KeyError
When this method is called before the Tally is populated with data
"""
# Ensure that the tally has data
if self.mean is None or self.std_dev is None:
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise KeyError(msg)
# Initialize a pandas dataframe for the tally data
df = pd.DataFrame()
# Find the total length of the tally data array
data_size = self.mean.size
# Build DataFrame columns for filters if user requested them
if filters:
# Append each Filter's DataFrame to the overall DataFrame
for f, stride in zip(self.filters, self.filter_strides):
filter_df = f.get_pandas_dataframe(
data_size, stride, paths=paths)
df = pd.concat([df, filter_df], axis=1)
# Include DataFrame column for nuclides if user requested it
if nuclides:
nuclides = []
column_name = 'nuclide'
for nuclide in self.nuclides:
if isinstance(nuclide, openmc.Nuclide):
nuclides.append(nuclide.name)
elif isinstance(nuclide, openmc.AggregateNuclide):
nuclides.append(nuclide.name)
column_name = '{}(nuclide)'.format(nuclide.aggregate_op)
else:
nuclides.append(nuclide)
# Tile the nuclide bins into a DataFrame column
nuclides = np.repeat(nuclides, len(self.scores))
tile_factor = data_size / len(nuclides)
df[column_name] = np.tile(nuclides, int(tile_factor))
# Include column for scores if user requested it
if scores:
scores = []
column_name = 'score'
for score in self.scores:
if isinstance(score, (str, openmc.CrossScore)):
scores.append(str(score))
elif isinstance(score, openmc.AggregateScore):
scores.append(score.name)
column_name = '{}(score)'.format(score.aggregate_op)
tile_factor = data_size / len(self.scores)
df[column_name] = np.tile(scores, int(tile_factor))
# Include columns for derivatives if user requested it
if derivative and (self.derivative is not None):
df['d_variable'] = self.derivative.variable
if self.derivative.material is not None:
df['d_material'] = self.derivative.material
if self.derivative.nuclide is not None:
df['d_nuclide'] = self.derivative.nuclide
# Append columns with mean, std. dev. for each tally bin
df['mean'] = self.mean.ravel()
df['std. dev.'] = self.std_dev.ravel()
df = df.dropna(axis=1)
# Expand the columns into Pandas MultiIndices for readability
if pd.__version__ >= '0.16':
columns = copy.deepcopy(df.columns.values)
# Convert all elements in columns list to tuples
for i, column in enumerate(columns):
if not isinstance(column, tuple):
columns[i] = (column,)
# Make each tuple the same length
max_len_column = len(max(columns, key=len))
for i, column in enumerate(columns):
delta_len = max_len_column - len(column)
if delta_len > 0:
new_column = list(column)
new_column.extend(['']*delta_len)
columns[i] = tuple(new_column)
# Create and set a MultiIndex for the DataFrame's columns, but only
# if any column actually is multi-level (e.g., a mesh filter)
if any(len(c) > 1 for c in columns):
df.columns = pd.MultiIndex.from_tuples(columns)
# Modify the df.to_string method so that it prints formatted strings.
# Credit to http://stackoverflow.com/users/3657742/chrisb for this trick
df.to_string = partial(df.to_string, float_format=float_format.format)
return df
def get_reshaped_data(self, value='mean'):
"""Returns an array of tally data with one dimension per filter.
The tally data in OpenMC is stored as a 3D array with the dimensions
corresponding to filters, nuclides and scores. As a result, tally data
can be opaque for a user to directly index (i.e., without use of
:meth:`openmc.Tally.get_values`) since one must know how to properly use
the number of bins and strides for each filter to index into the first
(filter) dimension.
This builds and returns a reshaped version of the tally data array with
unique dimensions corresponding to each tally filter. For example,
suppose this tally has arrays of data with shape (8,5,5) corresponding
to two filters (2 and 4 bins, respectively), five nuclides and five
scores. This method will return a version of the data array with the
with a new shape of (2,4,5,5) such that the first two dimensions
correspond directly to the two filters with two and four bins.
Parameters
----------
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
numpy.ndarray
The tally data array indexed by filters, nuclides and scores.
"""
# Get the 3D array of data in filters, nuclides and scores
data = self.get_values(value=value)
# Build a new array shape with one dimension per filter
new_shape = tuple(f.num_bins for f in self.filters)
new_shape += (self.num_nuclides, self.num_scores)
# Reshape the data with one dimension for each filter
data = np.reshape(data, new_shape)
return data
def hybrid_product(self, other, binary_op, filter_product=None,
nuclide_product=None, score_product=None):
"""Combines filters, scores and nuclides with another tally.
This is a helper method for the tally arithmetic operator overloaded
methods. It is called a "hybrid product" because it performs a
combination of tensor (or Kronecker) and entrywise (or Hadamard)
products. The filters from both tallies are combined using an entrywise
(or Hadamard) product on matching filters. By default, if all nuclides
are identical in the two tallies, the entrywise product is performed
across nuclides; else the tensor product is performed. By default, if
all scores are identical in the two tallies, the entrywise product is
performed across scores; else the tensor product is performed. Users
can also call the method explicitly and specify the desired product.
Parameters
----------
other : openmc.Tally
The tally on the right hand side of the hybrid product
binary_op : {'+', '-', '*', '/', '^'}
The binary operation in the hybrid product
filter_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
filter data. The default is the entrywise product. Currently only
the entrywise product is supported since a tally cannot contain
two of the same filter.
nuclide_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
nuclide data. The default is the entrywise product if all nuclides
between the two tallies are the same; otherwise the default is
the tensor product.
score_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
score data. The default is the entrywise product if all scores
between the two tallies are the same; otherwise the default is
the tensor product.
Returns
-------
openmc.Tally
A new Tally that is the hybrid product with this one.
Raises
------
ValueError
When this method is called before the other tally is populated
with data.
"""
# Set default value for filter product if it was not set
if filter_product is None:
filter_product = 'entrywise'
elif filter_product == 'tensor':
msg = 'Unable to perform Tally arithmetic with a tensor product' \
'for the filter data as this is not currently supported.'
raise ValueError(msg)
# Set default value for nuclide product if it was not set
if nuclide_product is None:
if self.nuclides == other.nuclides:
nuclide_product = 'entrywise'
else:
nuclide_product = 'tensor'
# Set default value for score product if it was not set
if score_product is None:
if self.scores == other.scores:
score_product = 'entrywise'
else:
score_product = 'tensor'
# Check product types
cv.check_value('filter product', filter_product, _PRODUCT_TYPES)
cv.check_value('nuclide product', nuclide_product, _PRODUCT_TYPES)
cv.check_value('score product', score_product, _PRODUCT_TYPES)
# Check that results have been read
if not other.derived and other.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(other.id)
raise ValueError(msg)
new_tally = Tally()
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally._num_realizations = self.num_realizations
new_tally._estimator = self.estimator
new_tally._with_summary = self.with_summary
new_tally._sp_filename = self._sp_filename
# Construct a combined derived name from the two tally operands
if self.name != '' and other.name != '':
new_name = '({} {} {})'.format(self.name, binary_op, other.name)
new_tally.name = new_name
# Query the mean and std dev so the tally data is read in from file
# if it has not already been read in.
self.mean, self.std_dev, other.mean, other.std_dev
# Create copies of self and other tallies to rearrange for tally
# arithmetic
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
self_copy.sparse = False
other_copy.sparse = False
# Align the tally data based on desired hybrid product
data = self_copy._align_tally_data(other_copy, filter_product,
nuclide_product, score_product)
# Perform tally arithmetic operation
if binary_op == '+':
new_tally._mean = data['self']['mean'] + data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '-':
new_tally._mean = data['self']['mean'] - data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '*':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] * data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '/':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] / data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '^':
with np.errstate(divide='ignore', invalid='ignore'):
mean_ratio = data['other']['mean'] / data['self']['mean']
first_term = mean_ratio * data['self']['std. dev.']
second_term = \
np.log(data['self']['mean']) * data['other']['std. dev.']
new_tally._mean = data['self']['mean'] ** data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(first_term**2 + second_term**2)
# Convert any infs and nans to zero
new_tally._mean[np.isinf(new_tally._mean)] = 0
new_tally._mean = np.nan_to_num(new_tally._mean)
new_tally._std_dev[np.isinf(new_tally._std_dev)] = 0
new_tally._std_dev = np.nan_to_num(new_tally._std_dev)
# Set tally attributes
if self_copy.estimator == other_copy.estimator:
new_tally.estimator = self_copy.estimator
if self_copy.with_summary and other_copy.with_summary:
new_tally.with_summary = self_copy.with_summary
if self_copy.num_realizations == other_copy.num_realizations:
new_tally.num_realizations = self_copy.num_realizations
# Add filters to the new tally
if filter_product == 'entrywise':
for self_filter in self_copy.filters:
new_tally.filters.append(self_filter)
else:
all_filters = [self_copy.filters, other_copy.filters]
for self_filter, other_filter in product(*all_filters):
new_filter = openmc.CrossFilter(self_filter, other_filter,
binary_op)
new_tally.filters.append(new_filter)
# Add nuclides to the new tally
if nuclide_product == 'entrywise':
for self_nuclide in self_copy.nuclides:
new_tally.nuclides.append(self_nuclide)
else:
all_nuclides = [self_copy.nuclides, other_copy.nuclides]
for self_nuclide, other_nuclide in product(*all_nuclides):
new_nuclide = openmc.CrossNuclide(self_nuclide, other_nuclide,
binary_op)
new_tally.nuclides.append(new_nuclide)
# Define helper function that handles score units appropriately
# depending on the binary operator
def cross_score(score1, score2, binary_op):
if binary_op == '+' or binary_op == '-':
if score1 == score2:
return score1
else:
return openmc.CrossScore(score1, score2, binary_op)
else:
return openmc.CrossScore(score1, score2, binary_op)
# Add scores to the new tally
if score_product == 'entrywise':
for self_score in self_copy.scores:
new_score = cross_score(self_score, self_score, binary_op)
new_tally.scores.append(new_score)
else:
all_scores = [self_copy.scores, other_copy.scores]
for self_score, other_score in product(*all_scores):
new_score = cross_score(self_score, other_score, binary_op)
new_tally.scores.append(new_score)
return new_tally
@property
def filter_strides(self):
all_strides = []
stride = self.num_nuclides * self.num_scores
for self_filter in reversed(self.filters):
all_strides.append(stride)
stride *= self_filter.num_bins
return all_strides[::-1]
def _align_tally_data(self, other, filter_product, nuclide_product,
score_product):
"""Aligns data from two tallies for tally arithmetic.
This is a helper method to construct a dict of dicts of the "aligned"
data arrays from each tally for tally arithmetic. The method analyzes
the filters, scores and nuclides in both tallies and determines how to
appropriately align the data for vectorized arithmetic. For example,
if the two tallies have different filters, this method will use NumPy
'tile' and 'repeat' operations to the new data arrays such that all
possible combinations of the data in each tally's bins will be made
when the arithmetic operation is applied to the arrays.
Parameters
----------
other : openmc.Tally
The tally to outer product with this tally
filter_product : {'entrywise'}
The type of product to be performed between filter data. Currently,
only the entrywise product is supported for the filter product.
nuclide_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
nuclide data.
score_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
score data.
Returns
-------
dict
A dictionary of dictionaries to "aligned" 'mean' and 'std. dev'
NumPy arrays for each tally's data.
"""
# Get the set of filters that each tally is missing
other_missing_filters = set(self.filters) - set(other.filters)
self_missing_filters = set(other.filters) - set(self.filters)
# Add filters present in self but not in other to other
for other_filter in other_missing_filters:
filter_copy = copy.deepcopy(other_filter)
other._mean = np.repeat(other.mean, filter_copy.num_bins, axis=0)
other._std_dev = np.repeat(other.std_dev, filter_copy.num_bins, axis=0)
other.filters.append(filter_copy)
# Add filters present in other but not in self to self
for self_filter in self_missing_filters:
filter_copy = copy.deepcopy(self_filter)
self._mean = np.repeat(self.mean, filter_copy.num_bins, axis=0)
self._std_dev = np.repeat(self.std_dev, filter_copy.num_bins, axis=0)
self.filters.append(filter_copy)
# Align other filters with self filters
for i, self_filter in enumerate(self.filters):
other_index = other.filters.index(self_filter)
# If necessary, swap other filter
if other_index != i:
other._swap_filters(self_filter, other.filters[i])
# Repeat and tile the data by nuclide in preparation for performing
# the tensor product across nuclides.
if nuclide_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_nuclides, axis=1)
self._std_dev = np.repeat(self.std_dev, other.num_nuclides, axis=1)
other._mean = np.tile(other.mean, (1, self.num_nuclides, 1))
other._std_dev = np.tile(other.std_dev, (1, self.num_nuclides, 1))
# Add nuclides to each tally such that each tally contains the complete
# set of nuclides necessary to perform an entrywise product. New
# nuclides added to a tally will have all their scores set to zero.
else:
# Get the set of nuclides that each tally is missing
other_missing_nuclides = set(self.nuclides) - set(other.nuclides)
self_missing_nuclides = set(other.nuclides) - set(self.nuclides)
# Add nuclides present in self but not in other to other
for nuclide in other_missing_nuclides:
other._mean = np.insert(other.mean, other.num_nuclides, 0, axis=1)
other._std_dev = np.insert(other.std_dev, other.num_nuclides, 0,
axis=1)
other.nuclides.append(nuclide)
# Add nuclides present in other but not in self to self
for nuclide in self_missing_nuclides:
self._mean = np.insert(self.mean, self.num_nuclides, 0, axis=1)
self._std_dev = np.insert(self.std_dev, self.num_nuclides, 0,
axis=1)
self.nuclides.append(nuclide)
# Align other nuclides with self nuclides
for i, nuclide in enumerate(self.nuclides):
other_index = other.get_nuclide_index(nuclide)
# If necessary, swap other nuclide
if other_index != i:
other._swap_nuclides(nuclide, other.nuclides[i])
# Repeat and tile the data by score in preparation for performing
# the tensor product across scores.
if score_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_scores, axis=2)
self._std_dev = np.repeat(self.std_dev, other.num_scores, axis=2)
other._mean = np.tile(other.mean, (1, 1, self.num_scores))
other._std_dev = np.tile(other.std_dev, (1, 1, self.num_scores))
# Add scores to each tally such that each tally contains the complete set
# of scores necessary to perform an entrywise product. New scores added
# to a tally will be set to zero.
else:
# Get the set of scores that each tally is missing
other_missing_scores = set(self.scores) - set(other.scores)
self_missing_scores = set(other.scores) - set(self.scores)
# Add scores present in self but not in other to other
for score in other_missing_scores:
other._mean = np.insert(other.mean, other.num_scores, 0, axis=2)
other._std_dev = np.insert(other.std_dev, other.num_scores, 0, axis=2)
other.scores.append(score)
# Add scores present in other but not in self to self
for score in self_missing_scores:
self._mean = np.insert(self.mean, self.num_scores, 0, axis=2)
self._std_dev = np.insert(self.std_dev, self.num_scores, 0, axis=2)
self.scores.append(score)
# Align other scores with self scores
for i, score in enumerate(self.scores):
other_index = other.scores.index(score)
# If necessary, swap other score
if other_index != i:
other._swap_scores(score, other.scores[i])
data = {}
data['self'] = {}
data['other'] = {}
data['self']['mean'] = self.mean
data['other']['mean'] = other.mean
data['self']['std. dev.'] = self.std_dev
data['other']['std. dev.'] = other.std_dev
return data
def _swap_filters(self, filter1, filter2):
"""Reverse the ordering of two filters in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared filters. This method reverses the order of
the two filters in place.
Parameters
----------
filter1 : Filter
The filter to swap with filter2
filter2 : Filter
The filter to swap with filter1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
cv.check_type('filter1', filter1, _FILTER_CLASSES)
cv.check_type('filter2', filter2, _FILTER_CLASSES)
# Check that the filters exist in the tally and are not the same
if filter1 == filter2:
return
elif filter1 not in self.filters:
msg = 'Unable to swap "{}" filter1 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter1.type, self.id)
raise ValueError(msg)
elif filter2 not in self.filters:
msg = 'Unable to swap "{}" filter2 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter2.type, self.id)
raise ValueError(msg)
# Construct lists of tuples for the bins in each of the two filters
filters = [type(filter1), type(filter2)]
if isinstance(filter1, openmc.DistribcellFilter):
filter1_bins = [b for b in range(filter1.num_bins)]
elif isinstance(filter1, openmc.EnergyFunctionFilter):
filter1_bins = [None]
else:
filter1_bins = filter1.bins
if isinstance(filter2, openmc.DistribcellFilter):
filter2_bins = [b for b in range(filter2.num_bins)]
elif isinstance(filter2, openmc.EnergyFunctionFilter):
filter2_bins = [None]
else:
filter2_bins = filter2.bins
# Create variables to store views of data in the misaligned structure
mean = {}
std_dev = {}
# Store the data from the misaligned structure
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
if self.mean is not None:
mean[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='mean')
if self.std_dev is not None:
std_dev[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='std_dev')
# Swap the filters in the copied version of this Tally
filter1_index = self.filters.index(filter1)
filter2_index = self.filters.index(filter2)
self.filters[filter1_index] = filter2
self.filters[filter2_index] = filter1
# Realign the data
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
indices = self.get_filter_indices(filters, filter_bins)
if self.mean is not None:
self.mean[indices, :, :] = mean[i]
if self.std_dev is not None:
self.std_dev[indices, :, :] = std_dev[i]
def _swap_nuclides(self, nuclide1, nuclide2):
"""Reverse the ordering of two nuclides in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared nuclides. This method reverses the order of
the two nuclides in place.
Parameters
----------
nuclide1 : Nuclide
The nuclide to swap with nuclide2
nuclide2 : Nuclide
The nuclide to swap with nuclide1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
cv.check_type('nuclide1', nuclide1, _NUCLIDE_CLASSES)
cv.check_type('nuclide2', nuclide2, _NUCLIDE_CLASSES)
# Check that the nuclides exist in the tally and are not the same
if nuclide1 == nuclide2:
msg = 'Unable to swap a nuclide with itself'
raise ValueError(msg)
elif nuclide1 not in self.nuclides:
msg = 'Unable to swap nuclide1 "{}" in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide1.name, self.id)
raise ValueError(msg)
elif nuclide2 not in self.nuclides:
msg = 'Unable to swap "{}" nuclide2 in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide2.name, self.id)
raise ValueError(msg)
# Swap the nuclides in the Tally
nuclide1_index = self.get_nuclide_index(nuclide1)
nuclide2_index = self.get_nuclide_index(nuclide2)
self.nuclides[nuclide1_index] = nuclide2
self.nuclides[nuclide2_index] = nuclide1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
nuclide1_mean = self.mean[:, nuclide1_index, :].copy()
nuclide2_mean = self.mean[:, nuclide2_index, :].copy()
self.mean[:, nuclide2_index, :] = nuclide1_mean
self.mean[:, nuclide1_index, :] = nuclide2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
nuclide1_std_dev = self.std_dev[:, nuclide1_index, :].copy()
nuclide2_std_dev = self.std_dev[:, nuclide2_index, :].copy()
self.std_dev[:, nuclide2_index, :] = nuclide1_std_dev
self.std_dev[:, nuclide1_index, :] = nuclide2_std_dev
def _swap_scores(self, score1, score2):
"""Reverse the ordering of two scores in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared scores. This method reverses the order
of the two scores in place.
Parameters
----------
score1 : str or CrossScore
The score to swap with score2
score2 : str or CrossScore
The score to swap with score1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Check that the scores are valid
if not isinstance(score1, (str, openmc.CrossScore)):
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score1, self.id)
raise ValueError(msg)
elif not isinstance(score2, (str, openmc.CrossScore)):
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score2, self.id)
raise ValueError(msg)
# Check that the scores exist in the tally and are not the same
if score1 == score2:
msg = 'Unable to swap a score with itself'
raise ValueError(msg)
elif score1 not in self.scores:
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score1, self.id)
raise ValueError(msg)
elif score2 not in self.scores:
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score2, self.id)
raise ValueError(msg)
# Swap the scores in the Tally
score1_index = self.get_score_index(score1)
score2_index = self.get_score_index(score2)
self.scores[score1_index] = score2
self.scores[score2_index] = score1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
score1_mean = self.mean[:, :, score1_index].copy()
score2_mean = self.mean[:, :, score2_index].copy()
self.mean[:, :, score2_index] = score1_mean
self.mean[:, :, score1_index] = score2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
score1_std_dev = self.std_dev[:, :, score1_index].copy()
score2_std_dev = self.std_dev[:, :, score2_index].copy()
self.std_dev[:, :, score2_index] = score1_std_dev
self.std_dev[:, :, score1_index] = score2_std_dev
def __add__(self, other):
"""Adds this tally to another tally or scalar value.
This method builds a new tally with data that is the sum of this
tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally which is the sum of this tally and the other
tally or scalar value in the addition.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='+')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally.name = self.name
new_tally._mean = self.mean + other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to add "{}" to Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __sub__(self, other):
"""Subtracts another tally or scalar value from this tally.
This method builds a new tally with data that is the difference of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to subtract from this tally
Returns
-------
openmc.Tally
A new derived tally which is the difference of this tally and the
other tally or scalar value in the subtraction.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='-')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean - other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to subtract "{}" from Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __mul__(self, other):
"""Multiplies this tally with another tally or scalar value.
This method builds a new tally with data that is the product of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally which is the product of this tally and the
other tally or scalar value in the multiplication.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='*')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean * other
new_tally._std_dev = self.std_dev * np.abs(other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to multiply Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __truediv__(self, other):
"""Divides this tally by another tally or scalar value.
This method builds a new tally with data that is the dividend of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to divide this tally by
Returns
-------
openmc.Tally
A new derived tally which is the dividend of this tally and the
other tally or scalar value in the division.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='/')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean / other
new_tally._std_dev = self.std_dev * np.abs(1. / other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to divide Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
"""Raises this tally to another tally or scalar value power.
This method builds a new tally with data that is the power of
this tally's data to that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
power : openmc.Tally or float
The tally or scalar value exponent
Returns
-------
openmc.Tally
A new derived tally which is this tally raised to the power of the
other tally or scalar value in the exponentiation.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(power, Tally):
new_tally = self.hybrid_product(power, binary_op='^')
# If original tally operand was sparse, sparsify the new tally
if self.sparse:
new_tally.sparse = True
elif isinstance(power, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self._mean ** power
self_rel_err = self.std_dev / self.mean
new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If original tally was sparse, sparsify the exponentiated tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to raise Tally ID="{}" to power "{}"'.format(self.id, power)
raise ValueError(msg)
return new_tally
def __radd__(self, other):
"""Right addition with a scalar value.
This reverses the operands and calls the __add__ method.
Parameters
----------
other : float
The scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally of this tally added with the scalar value.
"""
return self + other
def __rsub__(self, other):
"""Right subtraction from a scalar value.
This reverses the operands and calls the __sub__ method.
Parameters
----------
other : float
The scalar value to subtract this tally from
Returns
-------
openmc.Tally
A new derived tally of this tally subtracted from the scalar value.
"""
return -1. * self + other
def __rmul__(self, other):
"""Right multiplication with a scalar value.
This reverses the operands and calls the __mul__ method.
Parameters
----------
other : float
The scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally of this tally multiplied by the scalar value.
"""
return self * other
def __rdiv__(self, other):
"""Right division with a scalar value.
This reverses the operands and calls the __div__ method.
Parameters
----------
other : float
The scalar value to divide by this tally
Returns
-------
openmc.Tally
A new derived tally of the scalar value divided by this tally.
"""
return other * self**-1
def __abs__(self):
"""The absolute value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the absolute value of this tally.
"""
new_tally = copy.deepcopy(self)
new_tally._mean = np.abs(new_tally.mean)
return new_tally
def __neg__(self):
"""The negated value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the negated value of this tally.
"""
new_tally = self * -1
return new_tally
def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[],
squeeze=False):
"""Build a sliced tally for the specified filters, scores and nuclides.
This method constructs a new tally to encapsulate a subset of the data
represented by this tally. The subset of data to include in the tally
slice is determined by the scores, filters and nuclides specified in
the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings (e.g., ['absorption',
'nu-fission']
filters : Iterable of openmc.FilterMeta
An iterable of filter types (e.g., [MeshFilter, EnergyFilter])
filter_bins : list of Iterables
A list of iterables of filter bins corresponding to the specified
filter types (e.g., [(1,), ((0., 0.625e-6),)]). Each iterable
contains bins to slice for the corresponding filter type in the
filters parameter. Each bin is the integer ID for 'material',
'surface', 'cell', 'cellborn', and 'universe' Filters. Each bin is
an integer for the cell instance ID for 'distribcell' Filters. Each
bin is a 2-tuple of floats for 'energy' and 'energyout' filters
corresponding to the energy boundaries of the bin of interest. The
bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the
mesh cell of interest. The order of the bins in the list must
correspond to the `filters` argument.
nuclides : list of str
A list of nuclide name strings (e.g., ['U235', 'U238'])
squeeze : bool
Whether to remove filters with only a single bin in the sliced tally
Returns
-------
openmc.Tally
A new tally which encapsulates the subset of data requested in the
order each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Ensure that the tally has data
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Create deep copy of tally to return as sliced tally
new_tally = copy.deepcopy(self)
new_tally._derived = True
# Differentiate Tally with a new auto-generated Tally ID
new_tally.id = None
new_tally.sparse = False
if not self.derived and self.sum is not None:
new_sum = self.get_values(scores, filters, filter_bins,
nuclides, 'sum')
new_tally.sum = new_sum
if not self.derived and self.sum_sq is not None:
new_sum_sq = self.get_values(scores, filters, filter_bins,
nuclides, 'sum_sq')
new_tally.sum_sq = new_sum_sq
if self.mean is not None:
new_mean = self.get_values(scores, filters, filter_bins,
nuclides, 'mean')
new_tally._mean = new_mean
if self.std_dev is not None:
new_std_dev = self.get_values(scores, filters, filter_bins,
nuclides, 'std_dev')
new_tally._std_dev = new_std_dev
# SCORES
if scores:
score_indices = []
# Determine the score indices from any of the requested scores
for score in self.scores:
if score not in scores:
score_index = self.get_score_index(score)
score_indices.append(score_index)
# Loop over indices in reverse to remove excluded scores
for score_index in reversed(score_indices):
new_tally.remove_score(self.scores[score_index])
# NUCLIDES
if nuclides:
nuclide_indices = []
# Determine the nuclide indices from any of the requested nuclides
for nuclide in self.nuclides:
if nuclide.name not in nuclides:
nuclide_index = self.get_nuclide_index(nuclide.name)
nuclide_indices.append(nuclide_index)
# Loop over indices in reverse to remove excluded Nuclides
for nuclide_index in reversed(nuclide_indices):
new_tally.remove_nuclide(self.nuclides[nuclide_index])
# FILTERS
if filters:
# Determine the filter indices from any of the requested filters
for i, filter_type in enumerate(filters):
f = new_tally.find_filter(filter_type)
# Remove filters with only a single bin if requested
if squeeze:
if len(filter_bins[i]) == 1:
new_tally.filters.remove(f)
continue
else:
raise RuntimeError('Cannot remove sliced filter with '
'more than one bin.')
# Remove and/or reorder filter bins to user specifications
bin_indices = [f.get_bin_index(b)
for b in filter_bins[i]]
bin_indices = np.unique(bin_indices)
# Set bins for sliced filter
new_filter = copy.copy(f)
new_filter.bins = [f.bins[i] for i in bin_indices]
# Set number of bins manually for mesh/distribcell filters
if filter_type is openmc.DistribcellFilter:
new_filter._num_bins = f._num_bins
# Replace existing filter with new one
for j, test_filter in enumerate(new_tally.filters):
if isinstance(test_filter, filter_type):
new_tally.filters[j] = new_filter
# If original tally was sparse, sparsify the sliced tally
new_tally.sparse = self.sparse
return new_tally
def summation(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized sum of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the sum of the data
represented by the summation of the data in this tally. The tally data
sum is determined by the scores, filter bins and nuclides specified
in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to sum across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to sum across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being summed over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the sum of data requested.
"""
# Create new derived Tally for summation
tally_sum = Tally()
tally_sum._derived = True
tally_sum._estimator = self.estimator
tally_sum._num_realizations = self.num_realizations
tally_sum._with_batch_statistics = self.with_batch_statistics
tally_sum._with_summary = self.with_summary
tally_sum._sp_filename = self._sp_filename
tally_sum._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Sum across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, sum across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only sum across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Sum across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if type(self_filter) == filter_type:
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.sum(mean, axis=i, keepdims=True)
std_dev = np.sum(std_dev**2, axis=i, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally sum
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'sum')
tally_sum.filters.append(filter_sum)
# Add a copy of each filter not summed across to the tally sum
else:
tally_sum.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally sum
else:
tally_sum._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally sum
nuclide_sum = openmc.AggregateNuclide(nuclides, 'sum')
tally_sum.nuclides.append(nuclide_sum)
# Add a copy of this tally's nuclides to the tally sum
else:
tally_sum._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally sum
score_sum = openmc.AggregateScore(scores, 'sum')
tally_sum.scores.append(score_sum)
# Add a copy of this tally's scores to the tally sum
else:
tally_sum._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_sum.shape)
std_dev = np.reshape(std_dev, tally_sum.shape)
# Assign tally sum's data with the new arrays
tally_sum._mean = mean
tally_sum._std_dev = std_dev
# If original tally was sparse, sparsify the tally summation
tally_sum.sparse = self.sparse
return tally_sum
def average(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized average of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the average of the
data represented by the average of the data in this tally. The tally
data average is determined by the scores, filter bins and nuclides
specified in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to average across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to average across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being averaged over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the average of data requested.
"""
# Create new derived Tally for average
tally_avg = Tally()
tally_avg._derived = True
tally_avg._estimator = self.estimator
tally_avg._num_realizations = self.num_realizations
tally_avg._with_batch_statistics = self.with_batch_statistics
tally_avg._with_summary = self.with_summary
tally_avg._sp_filename = self._sp_filename
tally_avg._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Average across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, average across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only average across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Average across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if isinstance(self_filter, filter_type):
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.nanmean(mean, axis=i, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=i, keepdims=True)
std_dev /= len(bin_indices)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally avg
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'avg')
tally_avg.filters.append(filter_sum)
# Add a copy of each filter not averaged across to the tally avg
else:
tally_avg.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally avg
else:
tally_avg._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(nuclide_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally avg
nuclide_avg = openmc.AggregateNuclide(nuclides, 'avg')
tally_avg.nuclides.append(nuclide_avg)
# Add a copy of this tally's nuclides to the tally avg
else:
tally_avg._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(score_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally avg
score_sum = openmc.AggregateScore(scores, 'avg')
tally_avg.scores.append(score_sum)
# Add a copy of this tally's scores to the tally avg
else:
tally_avg._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_avg.shape)
std_dev = np.reshape(std_dev, tally_avg.shape)
# Assign tally avg's data with the new arrays
tally_avg._mean = mean
tally_avg._std_dev = std_dev
# If original tally was sparse, sparsify the tally average
tally_avg.sparse = self.sparse
return tally_avg
def diagonalize_filter(self, new_filter, filter_position=-1):
"""Diagonalize the tally data array along a new axis of filter bins.
This is a helper method for the tally arithmetic methods. This method
adds the new filter to a derived tally constructed copied from this one.
The data in the derived tally arrays is "diagonalized" along the bins in
the new filter. This functionality is used by the openmc.mgxs module; to
transport-correct scattering matrices by subtracting a 'scatter-P1'
reaction rate tally with an energy filter from a 'scatter' reaction
rate tally with both energy and energyout filters.
Parameters
----------
new_filter : Filter
The filter along which to diagonalize the data in the new
filter_position : int
Where to place the new filter in the Tally.filters list. Defaults
to last position.
Returns
-------
openmc.Tally
A new derived Tally with data diagaonalized along the new filter.
"""
cv.check_type('new_filter', new_filter, _FILTER_CLASSES)
cv.check_type('filter_position', filter_position, Integral)
if new_filter in self.filters:
msg = 'Unable to diagonalize Tally ID="{}" which already ' \
'contains a "{}" filter'.format(self.id, type(new_filter))
raise ValueError(msg)
# Add the new filter to a copy of this Tally
new_tally = copy.deepcopy(self)
new_tally.filters.insert(filter_position, new_filter)
# Determine "base" indices along the new "diagonal", and the factor
# by which the "base" indices should be repeated to account for all
# other filter bins in the diagonalized tally
indices = np.arange(0, new_filter.num_bins**2, new_filter.num_bins+1)
diag_factor = self.num_filter_bins // new_filter.num_bins
diag_indices = np.zeros(self.num_filter_bins, dtype=int)
# Determine the filter indices along the new "diagonal"
for i in range(diag_factor):
start = i * new_filter.num_bins
end = (i+1) * new_filter.num_bins
diag_indices[start:end] = indices + (i * new_filter.num_bins**2)
# Inject this Tally's data along the diagonal of the diagonalized Tally
if not self.derived and self.sum is not None:
new_tally._sum = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum[diag_indices, :, :] = self.sum
if not self.derived and self.sum_sq is not None:
new_tally._sum_sq = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum_sq[diag_indices, :, :] = self.sum_sq
if self.mean is not None:
new_tally._mean = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._mean[diag_indices, :, :] = self.mean
if self.std_dev is not None:
new_tally._std_dev = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._std_dev[diag_indices, :, :] = self.std_dev
# If original tally was sparse, sparsify the diagonalized tally
new_tally.sparse = self.sparse
return new_tally
class Tallies(cv.CheckedList):
"""Collection of Tallies used for an OpenMC simulation.
This class corresponds directly to the tallies.xml input file. It can be
thought of as a normal Python list where each member is a :class:`Tally`. It
behaves like a list as the following example demonstrates:
>>> t1 = openmc.Tally()
>>> t2 = openmc.Tally()
>>> t3 = openmc.Tally()
>>> tallies = openmc.Tallies([t1])
>>> tallies.append(t2)
>>> tallies += [t3]
Parameters
----------
tallies : Iterable of openmc.Tally
Tallies to add to the collection
"""
def __init__(self, tallies=None):
super().__init__(Tally, 'tallies collection')
if tallies is not None:
self += tallies
def append(self, tally, merge=False):
"""Append tally to collection
Parameters
----------
tally : openmc.Tally
Tally to append
merge : bool
Indicate whether the tally should be merged with an existing tally,
if possible. Defaults to False.
"""
if not isinstance(tally, Tally):
msg = 'Unable to add a non-Tally "{}" to the ' \
'Tallies instance'.format(tally)
raise TypeError(msg)
if merge:
merged = False
# Look for a tally to merge with this one
for i, tally2 in enumerate(self):
# If a mergeable tally is found
if tally2.can_merge(tally):
# Replace tally2 with the merged tally
merged_tally = tally2.merge(tally)
self[i] = merged_tally
merged = True
break
# If no mergeable tally was found, simply add this tally
if not merged:
super().append(tally)
else:
super().append(tally)
def insert(self, index, item):
"""Insert tally before index
Parameters
----------
index : int
Index in list
item : openmc.Tally
Tally to insert
"""
super().insert(index, item)
def merge_tallies(self):
"""Merge any mergeable tallies together. Note that n-way merges are
possible.
"""
for i, tally1 in enumerate(self):
for j, tally2 in enumerate(self):
# Do not merge the same tally with itself
if i == j:
continue
# If the two tallies are mergeable
if tally1.can_merge(tally2):
# Replace tally 1 with the merged tally
merged_tally = tally1.merge(tally2)
self[i] = merged_tally
# Remove tally 2 since it is no longer needed
self.pop(j)
# Continue iterating from the first loop
break
def _create_tally_subelements(self, root_element):
for tally in self:
root_element.append(tally.to_xml_element())
def _create_mesh_subelements(self, root_element):
already_written = set()
for tally in self:
for f in tally.filters:
if isinstance(f, openmc.MeshFilter):
if f.mesh.id not in already_written:
if len(f.mesh.name) > 0:
root_element.append(ET.Comment(f.mesh.name))
root_element.append(f.mesh.to_xml_element())
already_written.add(f.mesh.id)
def _create_filter_subelements(self, root_element):
already_written = dict()
for tally in self:
for f in tally.filters:
if f not in already_written:
root_element.append(f.to_xml_element())
already_written[f] = f.id
elif f.id != already_written[f]:
# Set the IDs of identical filters with different
# user-defined IDs to the same value
f.id = already_written[f]
def _create_derivative_subelements(self, root_element):
# Get a list of all derivatives referenced in a tally.
derivs = []
for tally in self:
deriv = tally.derivative
if deriv is not None and deriv not in derivs:
derivs.append(deriv)
# Add the derivatives to the XML tree.
for d in derivs:
root_element.append(d.to_xml_element())
def export_to_xml(self, path='tallies.xml'):
"""Create a tallies.xml file that can be used for a simulation.
Parameters
----------
path : str
Path to file to write. Defaults to 'tallies.xml'.
"""
root_element = ET.Element("tallies")
self._create_mesh_subelements(root_element)
self._create_filter_subelements(root_element)
self._create_tally_subelements(root_element)
self._create_derivative_subelements(root_element)
# Clean the indentation in the file to be user-readable
clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'tallies.xml'
# Write the XML Tree to the tallies.xml file
reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
|
mit
|
kit-cel/lecture-examples
|
nt2_ce2/uebung/modulation_pulsformung/Pulsformung.py
|
1
|
7188
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 10:31:13 2014
NTII Demo - Pulsformung
Systemmodell: Quelle --> QPSK --> Pulsformung
@author: Michael Schwall
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import scipy.signal as sig
import rrc as rrc
plt.close("all")
###############################################################################
## Systemparameter
###############################################################################
# Anzahl der simulierten Symbole
K = 65536
# Ueberabtastung (Samples pro Symbol)
N = 8
# RRC-Filter, Rolloff-Faktor, Anzahl Filterkoeffizienten
alpha = 0.35
N_rrc = N*4+1
# FFT Length
N_FFT = 512
# Pruefe Eingaben
assert (K > 0 and (K & (K - 1)) == 0), 'K muss eine Potenz von 2 sein'
assert (N > 0 and N%2 == 0), 'N muss groesser Null sein und gerade'
assert (alpha >= 0 and alpha <= 1), 'Fuer den Rolloff-Faktor gilt: 0 <= alpha <= 1'
assert (N_rrc > 0 and N_rrc%2 != 0), 'N_rrc muss groesser Null sein und ungerade'
###############################################################################
## Sender
###############################################################################
# QPSK Symbole erzeugen
s = 2 * np.random.randint(2, size=K)-1 + 1j * (2*np.random.randint(2, size=K)-1)
# Leistungsnormalisierung auf 1
s = 1/np.sqrt(2)*s
# Ueberabtasten um Faktor N
s_up = np.zeros(K*N,np.complex)
s_up[::N] = s;
# Rechteckfilter
h_rect = np.ones(N)
s_tx_rect = sig.lfilter(h_rect,1,s_up)
s_tx_rect = 1/np.sqrt(np.mean(np.abs(s_tx_rect)**2))*s_tx_rect
print "Mittlere Leistung von s_tx_rect = %f" % np.mean(np.abs(s_tx_rect)**2)
# Dreiecksfilter
h_tri = np.concatenate([np.arange(0,int(N)+1),np.arange(int(N)-1,0,-1)])
s_tx_tri = sig.lfilter(h_tri,1,s_up)
s_tx_tri = 1/np.sqrt(np.mean(np.abs(s_tx_tri)**2))*s_tx_tri
print "Mittlere Leistung von s_tx_tri = %f" % np.mean(np.abs(s_tx_tri)**2)
# Root-Raised-Cosine (RRC) Filter
h_rrc = rrc.get_rrc_ir(N_rrc,N,1.0,alpha)
s_tx_rrc = sig.lfilter(h_rrc,1,s_up)
s_tx_rrc = 1/np.sqrt(np.mean(np.abs(s_tx_rrc)**2))*s_tx_rrc
print "Mittlere Leistung von s_tx_rect = %f" % np.mean(np.abs(s_tx_rrc)**2)
##############################################################################
# Ausgabe
##############################################################################
# Einschwingzeit RRC Filter (Hilfsgroesse)
N_osc = (N_rrc-1)/2
fig1 = plt.figure()
fig1.suptitle("Sender", fontsize=14, fontweight='bold')
ax1 = fig1.add_subplot(2,1,1)
ax1.set_title('Symbole (Tx, Realteil)')
ax1.stem(np.array(range(100)),s.real[:100])
ax1.grid(True)
ax1.set_xlabel('k (t/Ts)')
ax1.set_ylabel('Amplitude')
ax2 = fig1.add_subplot(2,1,2)
ax2.set_title('Symbole ueberabgetastet (Realteil)')
ax2.stem(np.array(range(100)),s_up.real[:100])
ax2.grid(True)
ax2.set_xlabel('k (t/Ts/N)')
ax2.set_ylabel('Amplitude')
fig2 = plt.figure()
fig2.suptitle("Pulsformung (Rechteck)", fontsize=14, fontweight='bold')
ax1 = fig2.add_subplot(1,3,1)
ax1.set_title('Impulsantwort Rechteck')
ax1.stem(np.arange(np.ceil(-N/2),np.ceil(N/2)),h_rect)
ax1.set_xlim(-N_osc,N_osc+1)
ax1.set_ylim(-0.1,1.1)
ax1.grid(True)
ax1.set_xlabel('k (t/Ts/N)')
ax1.set_ylabel('Amplitude')
ax2 = fig2.add_subplot(1,3,2)
ax2.set_title('Signal nach Rechteck-Pulsformung (Realteil, Blau)')
ax2.plot(np.array(range(256)),s_tx_rect.real[0:256])
ax2.plot(np.array(np.arange(0,256,N)),np.real(s[0:np.int(np.ceil(256/N))]),'go',markersize=4)
ax2.set_xlim(0,256)
ax2.grid(True)
ax2.set_xlabel('k (t/Ts/N)')
ax2.set_ylabel('Amplitude')
ax3 = fig2.add_subplot(1,3,3)
ax3.set_title('PSD Rechteck-Pulsformung')
Pxx_rect = 1/(K*N/N_FFT)*np.abs(np.fft.fftshift(np.fft.fft(np.reshape(s_tx_rect,(-1,N_FFT)),axis=1))).sum(0)
f = np.linspace(-0.5,0.5,len(Pxx_rect))
ax3.semilogy(f, Pxx_rect, 'b')
ax3.set_xlim(-0.5,0.5)
ax3.set_ylim([1e-1, np.sqrt(N*K)])
ax3.grid(True)
ax3.set_xlabel('n (f/N/Ts)')
fig3 = plt.figure()
fig3.suptitle("Pulsformung (Dreieck)", fontsize=14, fontweight='bold')
ax1 = fig3.add_subplot(1,3,1)
ax1.set_title('Impulsantwort Dreieck')
ax1.stem(np.arange(np.ceil(-N),np.ceil(N)),h_tri)
ax1.set_xlim(-N_osc,N_osc+1)
ax1.set_ylim(-0.1,1.1*np.max(h_tri))
ax1.grid(True)
ax1.set_xlabel('k (t/Ts/N)')
ax1.set_ylabel('Amplitude')
ax2 = fig3.add_subplot(1,3,2)
ax2.set_title('Signal nach Dreieck-Pulsformung (Realteil, Blau)')
ax2.plot(np.array(range(256)),s_tx_tri.real[N/2:256+N/2])
ax2.plot(np.array(np.arange(0,256,N)),np.real(s[0:np.int(np.ceil(256/N))]),'go',markersize=4)
ax2.set_xlim(0,256)
ax2.grid(True)
ax2.set_xlabel('k (t/Ts/N)')
ax2.set_ylabel('Amplitude')
ax3 = fig3.add_subplot(1,3,3)
ax3.set_title('PSD Dreieck-Pulsformung')
Pxx_tri = 1/(K*N/N_FFT)*np.abs(np.fft.fftshift(np.fft.fft(np.reshape(s_tx_tri,(-1,N_FFT)),axis=1))).sum(0)
f = np.linspace(-0.5,0.5,len(Pxx_tri))
ax3.semilogy(f, Pxx_tri, 'b')
ax3.set_xlim(-0.5,0.5)
ax3.set_ylim([1e-1, np.sqrt(N*K)])
ax3.grid(True)
ax3.set_xlabel('n (f/N/Ts)')
fig4 = plt.figure()
fig4.suptitle("Pulsformung (RRC)", fontsize=14, fontweight='bold')
ax1 = fig4.add_subplot(1,3,1)
ax1.set_title('Impulsantwort RRC')
ax1.stem(np.array(np.arange(-N_osc,N_osc+1)),h_rrc)
ax1.set_xlim(-N_osc,N_osc+1)
ax1.grid(True)
ax1.set_xlabel('k (t/Ts/N)')
ax1.set_ylabel('Amplitude')
ax2 = fig4.add_subplot(1,3,2)
ax2.set_title('Signal nach RRC-Pulsformung (Realteil, Blau)')
ax2.plot(np.array(range(256)),s_tx_rrc.real[(N_rrc-1)/2:256+(N_rrc-1)/2])
ax2.plot(np.array(np.arange(0,256,N)),np.real(s[0:np.int(np.ceil(256/N))]),'go',markersize=4)
ax2.set_xlim(0,256)
ax2.grid(True)
ax2.set_xlabel('k (t/Ts/N)')
ax2.set_ylabel('Amplitude')
ax3 = fig4.add_subplot(1,3,3)
ax3.set_title('PSD RRC-Pulsformung')
Pxx_rrc = 1/(K*N/N_FFT)*np.abs(np.fft.fftshift(np.fft.fft(np.reshape(s_tx_rrc,(-1,N_FFT)),axis=1))).sum(0)
f = np.linspace(-0.5,0.5,len(Pxx_rrc))
ax3.semilogy(f, Pxx_rrc, 'b')
ax3.set_xlim(-0.5,0.5)
ax3.set_ylim([1e-1, np.sqrt(N*K)])
ax3.grid(True)
ax3.set_xlabel('n (f/N/Ts)')
print "Energie von Pxx_rect = %f" % (np.sum(np.abs(Pxx_rect*(f[2]-f[1]))**2))
print "Energie von Pxx_tri = %f" % (np.sum(np.abs(Pxx_tri*(f[2]-f[1]))**2))
print "Energie von Pxx_rrc = %f\n" % (np.sum(np.abs(Pxx_rrc*(f[2]-f[1]))**2))
n_start = np.argmin(np.abs(f+0.05))
n_stop = np.argmin(np.abs(f-0.05))
print "Energie von Pxx_rect (-0.05...+0.05) = %f" % (np.sum(np.abs(Pxx_rect[n_start:n_stop]*(f[2]-f[1]))**2))
print "Energie von Pxx_tri (-0.05...+0.05) = %f" % (np.sum(np.abs(Pxx_tri[n_start:n_stop]*(f[2]-f[1]))**2))
print "Energie von Pxx_rrc (-0.05...+0.05) = %f\n" % (np.sum(np.abs(Pxx_rrc[n_start:n_stop]*(f[2]-f[1]))**2))
fig5 = plt.figure()
ax1 = fig5.add_subplot(1,1,1)
ax1.semilogy(f,Pxx_rect, label='Rechteck')
ax1.semilogy(f,Pxx_tri, label='Dreieck')
ax1.semilogy(f,Pxx_rrc, label='RRC')
ax1.grid(True)
ax1.set_xlim(-0.5,0.5)
ax1.set_ylim([1e-1, np.sqrt(N*K)])
ax1.set_xlabel('n (f/N/Ts)')
ax1.set_ylabel('Amplitude')
ax1.set_title('PSD Rechteck-/Dreieck/RRC-Pulsformung')
ax1.legend()
plt.show()
|
gpl-2.0
|
droundy/protein
|
pyplots/box_plot.py
|
2
|
18421
|
from __future__ import division
import numpy as np
import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
import sys
import pylab
import re
import file_loader as load
import matplotlib.patheffects
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid.anchored_artists import AnchoredSizeBar
import os
## WIP!!
#reads a special kind of data file printed by protein_microscopy.cpp
#format is:
# --------
# proteinType1 boxName1 n(t=0) n(t=1) n(t=2) ...
# proteinType1 boxName2 n(t=0) n(t=1) n(t=2) ...
# proteinType1 boxName3 n(t=0) n(t=1) n(t=2) ...
#
# proteinType2 boxName1 n(t=0) n(t=1) n(t=2) ...
# proteinType2 boxName2 n(t=0) n(t=1) n(t=2) ...
# proteinType2 boxName3 n(t=0) n(t=1) n(t=2) ...
# --------
#where n(t) is the number of proteins of one type at time t
#opens the file and grabs a particular line matching proteinType and
#boxName. returns list of protein counts at each time.
#box-plot--p-0.50-0.50-0.00-0.00-15.00-exact.dat
sim_type = load.sim_type
def returnData(boxName,proteinType):
#open the data file, grab the line with the correct protein type and box partition, load it as a [string] (so we can use list comprehensions)
filename = job_string + 'box-plot.dat'
with open(filename,"r") as boxData:
proteinsOverTime = [line for line in boxData if (proteinType in line) and (boxName in line)]
#format the string so that it is a list of numbers (split on tab, pop off keywords and newlines, convert str -> float)
proteinsOverTime = proteinsOverTime[0].split('\t')
proteinsOverTime = proteinsOverTime[2:-1]
proteinsOverTime = [float(i) for i in proteinsOverTime]
return proteinsOverTime
#takes input format:
#["proteinType1-boxNum1","proteinType1-boxnum2",proteinType2-boxnum1"...]. will
#return a list of lists in the stacking order specified by the input
#(first entry is at the bottom).
def stackData(plotList):
#parse the input
tempList = []
for proteinData in plotList:
protein, boxName = proteinData.split('-')
tempList += [returnData(boxName,protein)]
#"stack" the lists
stackedPlotList = [tempList[0]]
for i in range(1,len(tempList)):
stackedPlotList += [[j+k for (j,k) in zip(stackedPlotList[i-1],tempList[i])]]
output = np.array(stackedPlotList)
return output/output[len(output[:,0])-1, 0] # normalize output as a fraction of total
def find_period(f):
"""
Find the period of a function that is described by the input
array f, and return indices for a start and end range for one
period. If we cannot find the period accurately, just return
the entire range.
"""
f = -f
# first we look at the fft to get a guess at the period (probably
# not *too* accurate or too bad).
fk = np.fft.fft(f)
fk[0] = 0
kmax = 1
fkmax = np.abs(fk[:int(len(fk)/2)]).max()
for i in xrange(1,int(len(fk)/2)):
if np.abs(fk[i]) == fkmax:
kmax = i
break
#print 'kmax is', kmax
period_estimate = len(f)/kmax
#plt.plot(np.abs(fk))
#plt.figure()
if kmax < 5:
return (0, len(f))
# now we locate the final minimum of the function.
lastmin = len(f)-2
while f[lastmin] > f[lastmin+1] or f[lastmin] > f[lastmin-1]:
lastmin -= 1
# and last (but not least), we locate the second-to-last
# (penultimate) minimum, which should have a very similar value to
# the final minimum.
penultimate_min = lastmin - int(period_estimate*.7)
while f[penultimate_min] > f[penultimate_min+1] or f[penultimate_min] > f[penultimate_min-1] or np.abs(f[penultimate_min]/f[lastmin]-1) > 0.01:
penultimate_min -= 1
#return (0, len(f) - 1)
if penultimate_min < 0:
return (0, len(f))
return (penultimate_min, lastmin)
def main():
filename = job_string + 'box-plot.dat'
print "loading ",filename
with open(filename, "r") as boxData:
fileLines = boxData.readlines()
#get number of boxes and protein types. little hokey but it works. in boxData.readlines(), there is exactly one '\n' newline string
#for each protein type block. therefor, the number of protein types is equal to the number of times "\n" appears by itself in the list.
numProteinTypes = len([line for line in fileLines if line=="\n"])
numNewLines = numProteinTypes
#it follows that the total number of lines in the data file, minus the number of blank lines in the data file, is equal to
#the number of protein types * the number of box types. divide by number of protein types to get number of box types.
numBoxes = (len(fileLines) - numNewLines)/numProteinTypes
#grab the names of the proteins used, and the names of the boxes
proteinTypeList = []
boxList = []
for line in fileLines:
if (line != "\n"):
proteinTypeList += [line.split("\t")[0]]
boxList += [line.split("\t")[1]]
#prune duplicates
proteinTypeList = list(set(proteinTypeList))
boxList = list(set(boxList))
#generate list of proteinType and box combinations to feed into stackData
plotNameList_D = []
plotNameList_E = []
numProteinTypes_D = 0
numProteinTypes_E = 0
for box in boxList:
for proteinType in proteinTypeList:
if "D_" in proteinType:
plotNameList_D += ["%s-%s"%(box,proteinType)]
if "E_" in proteinType:
plotNameList_E += ["%s-%s"%(box,proteinType)]
#print ""
#print "plotNameList before ", plotNameList_D, "\n"
new_plotNameList_D = [0]*len(plotNameList_D)
P_Ord = [3,0,2,1,7,4,6,5,11,8,10,9]
if load.f_param4 == '97.00':
P_Ord = [3,0,2,1,11,8,10,9,15,12,14,13,7,4,6,5]
if load.f_param4 == '96.00':
P_Ord = [15,12,14,13,3,0,2,1,7,4,6,5,11,8,10,9]
for i in range(len(P_Ord)):
new_plotNameList_D[i] = plotNameList_D[P_Ord[i]]
for i in range(len(plotNameList_D)):
plotNameList_D[i] = new_plotNameList_D[i]
#print "plotNameList after ",plotNameList_D,"\n"
plotProteinLabels = ['MinD:ATP (cyto)','MinD:ATP (mem)','MinE:MinD:ATP','MinD:ADP (cyto)']
#pass plotNameList through stackData to generate the list of line data to be plotted
plotCurveList_D = stackData(plotNameList_D)
plotCurveList_E = stackData(plotNameList_E)
#get a time axis for the plot from the length of one of the data sets we have
difD = 2.5 # (um)^2 s^- 1
time_step = .1*load.dx*load.dx/difD #sec
print_denominator = 1000 #This is from the c++ I wanted to format things the same here.
box_time_step = time_step*print_denominator
timeAxis = np.linspace(0,box_time_step*len(plotCurveList_D[0]),len(plotCurveList_D[0]))
#begin messy code (to deal with matplotlib) - don't judge me
# start_time_as_frac_of_ten = float(sys.argv[8])
# end_time_as_frac_of_ten = float(sys.argv[9])
# tot_time = float(len(plotCurveList_D[0]))*box_time_step
start = int(float(sys.argv[8])/box_time_step)#int(tot_time*start_time_as_frac_of_ten/10.0/box_time_step)
end = int(float(sys.argv[9])/box_time_step)#int(tot_time*end_time_as_frac_of_ten/10.0/box_time_step)
# (start, end) = find_period(plotCurveList_D[len(plotCurveList_D)-2])
# (start, end) = find_period(np.array(returnData(boxList[len(boxList)-1], 'D_ND')))
# start = end - 2*(end-start)
# end = int(tot_time/box_time_step)
# if (load.f_shape == 'triangle') or (load.f_param4 == '95.00') or (load.f_param4 == '94.00'):
# start = 0
# end = int(tot_time*.5/box_time_step)
# if ((load.f_param2 == '14.00' or load.f_param2 == '5.50') and load.f_param4 == '99.00'):
# start = 0
# end = int(tot_time/box_time_step)
# periods_file = open('periods.txt','a')
# periods_file.write('Box period= '+str(box_time_step*(end-start)) +' for simulation '+load.f_shape+
# ' '+load.f_param1+' '+load.f_param2+' '+load.f_param3+' '+load.f_param4+' '+
# load.f_param5+', with start = '+str(start*box_time_step)+' and end = '+str(end*box_time_step)+'\n')
# periods_file.close()
# print useful coordination data
period = timeAxis[end-1] - timeAxis[start]
print 'period is', period
firsttime = timeAxis[start]
while firsttime > 9*period:
firsttime -= period
print 'early start time is', firsttime
print 'and end time is ',firsttime+period
print 'and file numbers are', firsttime*2, 'and', (firsttime+period)*2
# now offset time so it starts at zero
timeAxis = timeAxis - timeAxis[start]
#print set(plotCurveList_D[1]).union(set(plotCurveList_D[2]))
#get num on each plot
for proteinType in proteinTypeList:
if "D_" in proteinType:
numProteinTypes_D += 1
if "E_" in proteinType:
numProteinTypes_E +=1
# plot scales. colors limited for now.
# colorScale = ["b","g","r","c","m","y"]
# The tuples elements here are the amount of R,G,B in the color, respectively, on a scale 0-1
col_amount = 1.0
colorScale = ["b",(0.0,0.0,col_amount),(col_amount,0.0,0.0),(0.0,col_amount,0.0),"m","y"]
# alphaScale_D = [n/numProteinTypes for n in range(0,numProteinTypes_D+1)]
alphaScale_D = [0.1,0.25,0.50,1.00]
alphaScale_E = [n/numProteinTypes for n in range(0,numProteinTypes_E+1)]
#generate the plot
#f, (bax,sectionax) = plt.subplots(1, 2)
bax = plt.subplot2grid((2,5), (0,0), colspan=4, rowspan=2)
sectionax = plt.subplot2grid((2,5), (0,4), colspan=1,rowspan=2)
# first plot the section data...
filename = job_string + 'sections.dat'
sectiondata = np.loadtxt(filename)
def plot_sections(sectionax, sectiondata):
dx = load.dx
x = np.arange(sectiondata.shape[1]*1.0)*dx
y = np.arange(sectiondata.shape[0]*1.0)*dx
X,Y = np.meshgrid(x,y)
inmembrane = np.zeros_like(sectiondata)
inmembrane[sectiondata>0] = 1.0
xmax = X[sectiondata>0].max()
xmin = X[sectiondata>0].min()
ymax = Y[sectiondata>0].max()
ymin = Y[sectiondata>0].min()
ymean = (Y*inmembrane).sum()/inmembrane.sum()
xmean = (X*inmembrane).sum()/inmembrane.sum()
yweighted = (Y*sectiondata).sum()/sectiondata.sum()
xweighted = (X*sectiondata).sum()/sectiondata.sum()
levels = [0.5, 1.5, 2.5, 3.5, 4.5]
mycolors = ["w","g","r","m","c","y"]
for i in xrange(min(4, len(boxList))):
if boxList[i] == 'Right':
mycolors[1] = colorScale[i]
if boxList[i] == 'Mid':
mycolors[2] = colorScale[i]
if boxList[i] == 'Left':
mycolors[3] = colorScale[i]
mycolors = colorScale[1:]
if load.f_param4 == '97.00':
mycolors = ['g','r','m','c']
if load.f_param4 == '96.00':
#rightup = 2, rightdown = 1, leftup = 4, leftdown = 3
mycolors = ['g','r','c','m']
#print mycolors
# here we rotate so that the order of sections will match the
# box plot.
xdir, ydir = xweighted - xmean, yweighted - ymean
xdir, ydir = xdir/np.sqrt(xdir**2+ydir**2), ydir/np.sqrt(xdir**2+ydir**2)
extraxspace = 0.5
extrayspace = 0
Yrotated = X*xdir + Y*ydir
Xrotated = X*ydir - Y*xdir
sectionax.contourf(Xrotated, Yrotated, sectiondata, levels=levels, colors=mycolors)
xmin = Xrotated[sectiondata>0].min()
xmax = Xrotated[sectiondata>0].max()
ymin = Yrotated[sectiondata>0].min()
ymax = Yrotated[sectiondata>0].max()
sectionax.set_xlim(xmin-extraxspace, xmax)
sectionax.set_ylim(ymin-extrayspace, ymax)
sectionax.set_aspect('equal')
sectionax.set_frame_on(False)
sectionax.axes.get_xaxis().set_visible(False)
sectionax.axes.get_yaxis().set_visible(False)
sectionax.add_artist(AnchoredSizeBar(
sectionax.transData,
2.13, # length of the bar in the data reference
"2.13$\mu$", # label of the bar
# bbox_to_anchor=(0.,0.,1.,1.),
loc=8, # 'best', # location (lower right)
pad=-(ymax-ymin)/2.0 -.4, borderpad=0.25, sep=3,
frameon=False
))
plot_sections(sectionax, sectiondata)
section_names = ['Bottom Section','Center Section','Top Section']
if load.f_param4 == '97.00':
section_names = ['Lower Section','Middle Left Section','Middle Right Section','Upper Section']
# section_names = ['rightup','mid','left','rightdown']
if load.f_param4 == '96.00':
section_names = ['Lower Left Section','Lower Right Section','Upper Left Section','Upper Right Section']
# section_names = ['rightdown','rightup','leftdown','leftup']
font=FontProperties()
font.set_family('serif')
text_adjust = -.2*box_time_step*(end-start)
j=0
k=0
for i in range(len(plotCurveList_D[:,0])):
if i%(numProteinTypes_D)==0:
j+=1
k=0
if i==0:
bax.plot(timeAxis[start:end],
plotCurveList_D[i, start:end],
color=colorScale[j],alpha=alphaScale_D[k])
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .1*np.floor(i/numProteinTypes_D)
if load.f_param4 == '97.00' or load.f_param4 == '96.00':
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .07*np.floor(i/numProteinTypes_D)
y_label = (plotCurveList_D[i, start+int(1/box_time_step)])/2.0
bax.annotate('%s'%plotProteinLabels[i],xy=(1,y_label),xytext=(text_adjust,y_text_label),
fontsize=7,
fontproperties=font,
arrowprops=dict(facecolor='black',shrink=0.05, width=.3, headwidth=5.))
bax.fill_between(timeAxis[start:end],
[0 for x in range(len(timeAxis))[start:end]],
plotCurveList_D[i, start:end],
alpha=alphaScale_D[k],facecolor=colorScale[j])
elif i!=0:
if i == 1:
k+=1
bax.plot(timeAxis[start:end],
plotCurveList_D[i,start:end],
color=colorScale[j],alpha=alphaScale_D[k])
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .1*np.floor(i/numProteinTypes_D)
y_label = (plotCurveList_D[i, start+int(1/box_time_step)] + plotCurveList_D[i-1, start+int(1/box_time_step)])/2.0
if load.f_param4 == '97.00' or load.f_param4 == '96.00':
y_text_label = i*.8/len(plotCurveList_D[:,0]) + .07*np.floor(i/numProteinTypes_D)
bax.annotate('%s'%plotProteinLabels[i%numProteinTypes_D],xy=(1,y_label),xytext=(text_adjust,y_text_label),
fontsize=7,
fontproperties=font,
arrowprops=dict(facecolor='black',shrink=0.05, width=.3, headwidth=5.))
if (i+1)%(numProteinTypes_D)==0:
bax.text(-0.2,y_text_label+.04,section_names[int(np.floor(i/numProteinTypes_D))],transform=bax.transAxes,fontsize=9,fontproperties=font,)
bax.fill_between(timeAxis[start:end],
plotCurveList_D[i-1, start:end],
plotCurveList_D[i, start:end],
alpha=alphaScale_D[k],facecolor=colorScale[j])
k+=1
bax.set_xlim(timeAxis[start],timeAxis[end-1])
print timeAxis[start]
bax.set_xticklabels(np.arange(timeAxis[start]+100, timeAxis[end-1]+100, 0.5))
#bax.set_xticklabels(np.arange(0, 10, 0.5))
bax.get_yaxis().set_visible(False)
bax.set_ylim(0, 1)
bax.set_title("MinD protein counts over time")
bax.set_xlabel("Time (s)")
rax = bax.twinx()
rax.set_ylabel('Fraction of proteins in each stage and section',labelpad=-15)
rax.yaxis.set_ticklabels([0,"","","","",1.0])
#bax.set_ylabel("Fraction of proteins")
# 'A', xy=(Az, Ax), xytext=(1.2,-3.5),
# path_effects=texteff,
# arrowprops=dict(shrink=0.01, width=1,
# headwidth=hw, path_effects=arroweff))
#bax.legend(plotNameList_D,bbox_to_anchor=(0.3,-0.05,1.0,1.0),loc=4,prop={'size':8}).draw_frame(False)
print load.print_string("box-plot_D","")
plt.savefig(load.print_string("box-plot_D",""))
#plt.show()
plt.figure()
#f, (bax,sectionax) = plt.subplots(1, 2)
bax = plt.subplot2grid((2,5), (0,0), colspan=4, rowspan=2)
sectionax = plt.subplot2grid((2,5), (0,4), colspan=1,rowspan=2)
# First plot the section data...
plot_sections(sectionax, sectiondata)
j=0
k=0
for i in range(len(plotCurveList_E)):
if i%(numProteinTypes_E)==0:
j+=1
k=0
if i==0:
bax.plot(timeAxis[start:end],plotCurveList_E[i][start:end],color=colorScale[j],alpha=alphaScale_E[k])
bax.fill_between(timeAxis[start:end],[0 for x in range(len(timeAxis))[start:end]],plotCurveList_E[i][start:end],alpha=alphaScale_E[k],facecolor=colorScale[j])
elif i!=0:
bax.plot(timeAxis[start:end],plotCurveList_E[i][start:end],color=colorScale[j],alpha=alphaScale_E[k])
bax.fill_between(timeAxis[start:end],plotCurveList_E[i-1][start:end],plotCurveList_E[i][start:end],alpha=alphaScale_E[k],facecolor=colorScale[j])
#print "i is ",i," || k is", k," || j is",j
k+=1
bax.set_xlim(timeAxis[start],timeAxis[end-1])
bax.set_ylim(0, 1)
bax.set_title("MinE protein counts over time")
bax.set_xlabel("Time (s)")
bax.set_ylabel("Fraction of proteins")
bax.legend(plotNameList_E,bbox_to_anchor=(0.3,-0.05,1.0,1.0),loc="lower right",prop={'size':8}).draw_frame(False)
plt.savefig(load.print_string("box-plot_E",""))
return 0
job_string = "data/shape-%s/%s-%s-%s-%s-%s-%s/" % (load.f_shape,load.f_param1,load.f_param2,
load.f_param3,load.f_param4,load.f_param5,sim_type)
print job_string
print sim_type
p = re.compile('[.]')
job_string = p.sub('_',job_string)
dir_name = job_string + 'plots'
if not os.path.exists(dir_name):
print "making directory "+dir_name+" because doesnt exist"
os.makedirs(dir_name)
if __name__ == '__main__':
main()
|
mit
|
beepee14/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
233
|
7819
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
vshtanko/scikit-learn
|
examples/missing_values.py
|
233
|
3056
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
Ichaelus/Github-Classifier
|
Playground/naive_bayes.py
|
1
|
1219
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 11:58:08 2016
@author: andreas
"""
import utilities
from utilities import *
# Get raw text + labels
texts, labels, label_names = get_data()
# trainingsdaten werden in eingabedaten (vektoren) umgewandelt
# features ist dann matrix bestehend aus den einzelnen vektoren
features = vectorize_text(texts)
# x sind die eingabematrizen, y sind die vektoren in denen die ergebnisse stehen
x_train, x_test, y_train, y_test = split_train_test(features, labels, ratio=0.5)
# das trainieren mit den daten
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(x_train, y_train)
#variablen zum analysieren der ergebnisse
succ = 0
total = 0
dev_count = 0
#wir gehen testdaten durch und schaun wie sie eingeordnet werden
for i in xrange(len(x_test)):
pred = clf.predict([x_test[i]])
if y_test[i] != label_names.index('DEV'):
if pred == y_test[i]:
succ = succ + 1
total = total + 1
if pred == 0:
dev_count = dev_count + 1
succ = succ / float(total)
print "Ratio predicted Dev vs all classes", dev_count / float(len(x_test))
print "Result without dev:", succ
print "Result with dev: ", clf.score(x_test, y_test)
|
mit
|
gilbert1991/map
|
my_util.py
|
1
|
7651
|
import geopy.distance as gpy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
import cv2
import objects as obj
import http_handler as hh
import setting as st
# boundary = [width, height]
# Keep the w/h ratio
def resizeImage(inputFile, outputFile, boundary=[800, 800]):
img = cv2.imread(inputFile)
h_ratio, w_ratio = float(boundary[0]) / img.shape[0], float(boundary[1]) / img.shape[1]
ratio = h_ratio if h_ratio <= w_ratio else w_ratio
width, height = int(img.shape[1] * ratio), int(img.shape[0] * ratio)
out = cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)
cv2.imwrite(outputFile, out)
return width, height
def snapRoadNetwork(origin, radius, interval, sector):
print 'Snap road networks at (%f, %f) with radius %f' % (origin[0], origin[1], radius)
network = []
# Slice a circle into sectors with 2*radius
vertices = sliceCircle(origin, radius * 2, sector)
biSec = sector / 2
triSec = sector / 3
# Create virtual paths for road snapping
for idx in range(0, sector):
road, reverse_road = twoPointsSnap([vertices[idx], vertices[(idx+biSec) % sector]], origin, radius, interval)
network.append(road)
# network.append(reverse_road)
# road, reverse_road = twoPointsSnap([vertices[idx], vertices[(idx+triSec) % sector]], origin, radius, interval)
# network.append(road)
# network.append(reverse_road)
# plotNetwork(network)
return network
def twoPointsSnap(path, origin, radius, interval):
# snap path to road
road = hh.parseJsonRoad(hh.snapToRoad(path))
# interpolate the road and snap again to get optimized sample locations
road = hh.parseJsonRoad(hh.snapToRoad(interpolate(road, interval)))
road = filterPath(road, interval)
#reverse the query to better cover the area
reverse_road = hh.parseJsonRoad(hh.snapToRoad(list(reversed(road)))) if road else []
reverse_road = hh.parseJsonRoad(hh.snapToRoad(interpolate(reverse_road, interval)))
reverse_road = filterPath(reverse_road, interval)
# print (len(road), len(reverse_road))
# Exclude points beyond radius*1.5
road = [pt for pt in road if np.sqrt((pt[1]-origin[1])**2+(pt[0]-origin[0])**2) < radius*1.3]
reverse_road = [pt for pt in reverse_road if np.sqrt((pt[1]-origin[1])**2+(pt[0]-origin[0])**2) < radius*1.3]
# print (len(road), len(reverse_road))
return road, reverse_road
def filterPath(path, interval):
# Filter the samples with density 1/interval
for i in list(reversed(range(len(path)))):
if np.sqrt((path[i-1][1]-path[i][1])**2+(path[i-1][0]-path[i][0])**2) < interval:
del path[i-1]
return path
# Evenly slice a circle into sectors
def sliceCircle(origin, radius, sectors):
sector_width = 360 / sectors
vertices = []
for x in range(0, sectors):
vertex = (origin[0] + np.sin(np.radians(x * sector_width)) * radius,
origin[1] + np.cos(np.radians(x * sector_width)) * radius)
vertices.append(vertex)
return vertices
# interpolate points if the interval is too large
def interpolate(path, interval):
size = len(path)
result_path = []
for idx in range(size - 1):
p1 = path[idx]
p2 = path[idx+1]
result_path.append(p1)
# number of interpolating points needed
interpolate_points = np.floor( np.sqrt((p2[1]-p1[1])**2+(p2[0]-p1[0])**2) / interval ) - 1
if interpolate_points > 0:
# calculate the steps of lat & lng
step0 = (p2[0]-p1[0]) / (interpolate_points + 1)
step1 = (p2[1]-p1[1]) / (interpolate_points + 1)
# insert the inter points
for idx in range(1, int(interpolate_points+1)):
result_path.append((p1[0]+idx*step0, p1[1]+idx*step1))
result_path.append(p2)
return result_path
def plotMultiWeights(img_list, headings, maxWeight):
size = len(headings)
fig = plt.figure()
cmhot = plt.get_cmap("hot")
print maxWeight
for i in range(size):
heading = headings[i]
sub_list = [img for img in img_list if img.cameraPara.heading == heading]
geo_list = [img.position for img in sub_list]
x = [geo[0] for geo in geo_list]
y = [geo[1] for geo in geo_list]
z = [img.weight for img in sub_list]
ax = fig.add_subplot(1, size, i+1, projection='3d')
ax.scatter(x, y, z, s=50, c=np.abs(z), cmap=cmhot)
ax.set_zlim([0, float( maxWeight)])
ax.set_title('Heading %s' % heading)
plt.show()
def plot3D(x, y, z):
fig = plt.figure()
ax = fig.gca(projection='3d')
cmhot = plt.get_cmap("hot")
ax.scatter(x, y, z, s=50, c=np.abs(z), cmap=cmhot)
plt.show()
def plotNetwork(network):
marker_style = dict(color='cornflowerblue', linestyle=':', marker='o',
markersize=8, markerfacecoloralt='gray')
fig, ax = plt.subplots()
for path in network:
print len(path)
x = []
y = []
for p in path:
x.append(p[1])
y.append(p[0])
# Plot all fill styles
ax.plot(x, y, fillstyle='full', **marker_style)
plt.show()
if __name__ == '__main__':
network = snapRoadNetwork((40.693935, -73.983245), 0.0005, 0.0001, 8)
print '%d locations sampled in network' % sum([len(path) for path in network])
plotNetwork(network)
# width, height = resizeImage(st.path + "image/query/bobst.jpg", st.path + "image/query/bobst_1.jpg")
# print (width, height)
# path = [(40.693902999999999, -73.982433999999998), (40.694902999999996, -73.983434000000003)]
# # path = interpolate([v1, v2], interval)
# road = hh.parseJsonRoad(hh.snapToRoad(path))
# print road
# print interpolate(road, 0.0001)
# /Depricated
# Convert geo coords (lat, lng) diff to distance in meters
# lat: 0.0001 ~= 11.1m, lng 0.0001 ~= 8.5m
# def geoDistance(ori, dst, method='vincenty'):
# if method == 'vincenty':
# distance = gpy.vincenty(ori, dst).meters
# elif method == 'great_circle':
# distance = gpy.great_circle(ori, dst).meters
# else:
# print('Error with geo_distance')
# return distance
# /Depricated
# Sample Location Generation
# def hexagon(origin = (0, 0), radius = 20, interval = 1):
# print 'Generating hexagon points...'
# # init point list with origin at layer 0
# point_list = [(origin[0], origin[1])]
# # number of layers to cover the area
# # layer is the position of a hexagon
# no_layer = int(math.ceil(radius / interval))
# # Generate points layer by layer
# for lyr in range(1, no_layer + 1):
# unit_length = interval * lyr
# height_length = unit_length * math.sqrt(3) / 2
# layer_list = []
# # Create 6 vertices
# # 3 o---o 2
# # / \ / \
# # 4 o---o---o 1
# # \ / \ /
# # 5 o---o 6
# layer_list.append((origin[0] + unit_length, origin[1])) # 1
# layer_list.append((origin[0] + unit_length / 2, origin[1] + height_length)) # 2
# layer_list.append((origin[0] - unit_length / 2, origin[1] + height_length)) # 3
# layer_list.append((origin[0] - unit_length, origin[1])) # 4
# layer_list.append((origin[0] - unit_length / 2, origin[1] - height_length)) # 5
# layer_list.append((origin[0] + unit_length / 2, origin[1] - height_length)) # 6
# # number of points on an edge exclude two vertices
# no_point_on_edge = lyr - 1
# # Append start vertext and points on edge , edge by edge
# for edge in range(6):
# v_start = layer_list[edge]
# v_end = layer_list[(edge + 1) % 6]
# lat_diff = (v_start[0] - v_end[0]) / (no_point_on_edge + 1)
# lng_diff = (v_start[1] - v_end[1]) / (no_point_on_edge + 1)
# point_list.append(v_start)
# for pt in range(1, no_point_on_edge + 1):
# point_list.append((v_start[0] - pt * lat_diff, v_start[1] - pt * lng_diff))
# print '%d points and %d layers generated' % (len(point_list), no_layer)
# return point_list
|
mit
|
liikGit/MissionPlanner
|
Lib/site-packages/numpy/lib/polynomial.py
|
58
|
35930
|
"""
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
|
gpl-3.0
|
alongwithyou/auto-sklearn
|
autosklearn/estimators.py
|
5
|
4834
|
import os
import random
import shutil
import numpy as np
import autosklearn.automl
from autosklearn.constants import *
class AutoSklearnClassifier(autosklearn.automl.AutoML):
"""This class implements the classification task. It must not be pickled!
Parameters
----------
time_left_for_this_task : int, optional (default=3600)
Time limit in seconds for the search for appropriate classification
models. By increasing this value, *auto-sklearn* will find better
configurations.
per_run_time_limit : int, optional (default=360)
Time limit for a single call to machine learning model.
initial_configurations_via_metalearning : int, optional (default=25)
ensemble_size : int, optional (default=50)
ensemble_nbest : int, optional (default=50)
seed : int, optional (default=1)
ml_memory_limit : int, optional (3000)
Memory limit for the machine learning algorithm. If the machine
learning algorithm allocates tries to allocate more memory,
its evaluation will be stopped.
"""
def __init__(self, time_left_for_this_task=3600,
per_run_time_limit=360,
initial_configurations_via_metalearning=25,
ensemble_size=50, ensemble_nbest=50, seed=1,
ml_memory_limit=3000):
random_number = random.randint(0, 10000)
pid = os.getpid()
output_dir = "/tmp/autosklearn_output_%d_%d" % (pid, random_number)
tmp_dir = "/tmp/autosklearn_tmp_%d_%d" % (pid, random_number)
os.makedirs(output_dir)
os.makedirs(tmp_dir)
super(AutoSklearnClassifier, self).__init__(
tmp_dir, output_dir, time_left_for_this_task, per_run_time_limit,
log_dir=tmp_dir,
initial_configurations_via_metalearning=initial_configurations_via_metalearning,
ensemble_size=ensemble_size, ensemble_nbest=ensemble_nbest,
seed=seed, ml_memory_limit=ml_memory_limit)
def __del__(self):
self._delete_output_directories()
def _create_output_directories(self):
os.makedirs(self.output_dir)
os.makedirs(self.tmp_dir)
def _delete_output_directories(self):
shutil.rmtree(self.tmp_dir)
shutil.rmtree(self.output_dir)
def fit(self, X, y, metric='acc_metric', feat_type=None):
"""Fit *autosklearn* to given training set (X, y).
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target classes.
metric : str, optional (default='acc_metric')
The metric to optimize for. Can be one of: ['acc_metric',
'auc_metric', 'bac_metric', 'f1_metric', 'pac_metric']
feat_type : list, optional (default=None)
List of :python:`len(X.shape[1])` describing if an attribute is
continuous or categorical. Categorical attributes will
automatically 1Hot encoded.
"""
# Fit is supposed to be idempotent!
self._delete_output_directories()
self._create_output_directories()
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
self.n_classes_ = np.array(self.n_classes_, dtype=np.int)
if self.n_outputs_ > 1:
task = MULTILABEL_CLASSIFICATION
else:
if len(self.classes_[0]) == 2:
task = BINARY_CLASSIFICATION
else:
task = MULTICLASS_CLASSIFICATION
# TODO: fix metafeatures calculation to allow this!
if y.shape[1] == 1:
y = y.flatten()
return super(AutoSklearnClassifier, self).fit(X, y, task, metric,
feat_type)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
return super(AutoSklearnClassifier, self).predict(X)
class AutoSklearnRegressor(autosklearn.automl.AutoML):
def __init__(self, **kwargs):
raise NotImplementedError()
|
bsd-3-clause
|
SvichkarevAnatoly/Course-Python-Bioinformatics
|
semester2/task9/exercise1.py
|
1
|
3771
|
import random
import matplotlib.pyplot as plot
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
# read data into iterable
data = open("winequality-red.csv")
xList = []
labels = []
names = []
firstLine = True
for line in data:
if firstLine:
names = line.strip().split(";")
firstLine = False
else:
# split on semi-colon
row = line.strip().split(";")
# put labels in separate array
labels.append(float(row[-1]))
# remove label from row
row.pop()
# convert row to floats
floatRow = [float(num) for num in row]
xList.append(floatRow)
nrows = len(xList)
ncols = len(xList[0])
# take fixed test set 30% of sample
random.seed(1) # set seed so results are the same each run
nSample = int(nrows * 0.30)
idxTest = random.sample(range(nrows), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nrows) if not (idx in idxTest)]
# Define test and training attribute and label sets
xTrain = [xList[r] for r in idxTrain]
xTest = [xList[r] for r in idxTest]
yTrain = [labels[r] for r in idxTrain]
yTest = [labels[r] for r in idxTest]
# train a series of models on random subsets of the training data
# collect the models in a list and check error of composite as list grows
# maximum number of models to generate
numTreesMax = 30
# tree depth - typically at the high end
treeDepth = 1
# pick how many attributes will be used in each model.
# authors recommend 1/3 for regression problem
# nAttr = ncols / 3
nAttr = 4
# initialize a list to hold models
modelList = []
indexList = []
predList = []
nTrainRows = len(yTrain)
for iTrees in range(numTreesMax):
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
# take random sample of attributes
idxAttr = random.sample(range(ncols), nAttr)
idxAttr.sort()
indexList.append(idxAttr)
# take a random sample of training rows
idxRows = []
for i in range(int(0.5 * nTrainRows)):
idxRows.append(random.choice(range(len(xTrain))))
idxRows.sort()
# build training set
xRfTrain = []
yRfTrain = []
for i in range(len(idxRows)):
temp = [xTrain[idxRows[i]][j] for j in idxAttr]
xRfTrain.append(temp)
yRfTrain.append(yTrain[idxRows[i]])
modelList[-1].fit(xRfTrain, yRfTrain)
# restrict xTest to attributes selected for training
xRfTest = []
for xx in xTest:
temp = [xx[i] for i in idxAttr]
xRfTest.append(temp)
latestOutSamplePrediction = modelList[-1].predict(xRfTest)
predList.append(list(latestOutSamplePrediction))
# build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
# add the first "iModels" of the predictions and multiply by eps
prediction = []
for iPred in range(len(xTest)):
prediction.append(sum([predList[i][iPred]
for i in range(iModels + 1)]) / (iModels + 1))
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
plot.plot(nModels, mse)
plot.axis('tight')
plot.xlabel('Number of Trees in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
# plot.show()
fig_name = "mseEx1_ntm" + str(numTreesMax) + "td" + str(treeDepth) + \
"na" + str(nAttr) + ".png"
plot.savefig(fig_name)
print('Minimum MSE for ' + fig_name)
print(min(mse))
# # save first 2 tree
# with open("tree1Ex1.dot", 'w') as f1:
# f1 = tree.export_graphviz(modelList[0], out_file=f1)
#
# with open("tree2Ex1.dot", 'w') as f2:
# f2 = tree.export_graphviz(modelList[1], out_file=f2)
|
gpl-2.0
|
shangwuhencc/scikit-learn
|
examples/decomposition/plot_incremental_pca.py
|
244
|
1878
|
"""
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
|
bsd-3-clause
|
SaganBolliger/nupic
|
examples/opf/tools/testDiagnostics.py
|
58
|
1606
|
import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
|
agpl-3.0
|
ryfeus/lambda-packs
|
Skimage_numpy/source/skimage/io/manage_plugins.py
|
14
|
10495
|
"""Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
import sys
if sys.version.startswith('3'):
from configparser import ConfigParser # Python 3
else:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib'],
'imshow_collection': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'imshow_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imshow_collection',
'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins are shown in the "
"`skimage.io` docstring.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection', 'imshow_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
|
mit
|
Asimmetric/influxdb-python
|
examples/tutorial_pandas.py
|
10
|
1381
|
import argparse
import pandas as pd
from influxdb import DataFrameClient
def main(host='localhost', port=8086):
user = 'root'
password = 'root'
dbname = 'example'
client = DataFrameClient(host, port, user, password, dbname)
print("Create pandas DataFrame")
df = pd.DataFrame(data=list(range(30)),
index=pd.date_range(start='2014-11-16',
periods=30, freq='H'))
print("Create database: " + dbname)
client.create_database(dbname)
print("Write DataFrame")
client.write_points(df, 'demo')
print("Write DataFrame with Tags")
client.write_points(df, 'demo', {'k1': 'v1', 'k2': 'v2'})
print("Read DataFrame")
client.query("select * from demo")
print("Delete database: " + dbname)
client.delete_database(dbname)
def parse_args():
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname of InfluxDB http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port of InfluxDB http API')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
|
mit
|
cython-testbed/pandas
|
pandas/core/tools/timedeltas.py
|
2
|
6250
|
"""
timedelta support tools
"""
import numpy as np
import pandas as pd
from pandas._libs import tslibs
from pandas._libs.tslibs.timedeltas import (convert_to_timedelta64,
array_to_timedelta64)
from pandas.core.dtypes.common import (
ensure_object,
is_integer_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
def to_timedelta(arg, unit='ns', box=True, errors='raise'):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, list, tuple, 1-d array, or Series
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an
integer/float number
box : boolean, default True
- If True returns a Timedelta/TimedeltaIndex of the results
- if False returns a np.timedelta64 or ndarray of values of dtype
timedelta64[ns]
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02',
'00:00:03', '00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_datetime : Convert argument to datetime.
"""
unit = _validate_timedelta_unit(unit)
if errors not in ('ignore', 'raise', 'coerce'):
raise ValueError("errors must be one of 'ignore', "
"'raise', or 'coerce'}")
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, unit=unit,
box=False, errors=errors)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box,
errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
arg = arg.item()
elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, timedelta, list, tuple, '
'1-d array, or Series')
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit,
box=box, errors=errors)
_unit_map = {
'Y': 'Y',
'y': 'Y',
'W': 'W',
'w': 'W',
'D': 'D',
'd': 'D',
'days': 'D',
'Days': 'D',
'day': 'D',
'Day': 'D',
'M': 'M',
'H': 'h',
'h': 'h',
'm': 'm',
'T': 'm',
'S': 's',
's': 's',
'L': 'ms',
'MS': 'ms',
'ms': 'ms',
'US': 'us',
'us': 'us',
'NS': 'ns',
'ns': 'ns',
}
def _validate_timedelta_unit(arg):
""" provide validation / translation for timedelta short units """
try:
return _unit_map[arg]
except (KeyError, TypeError):
if arg is None:
return 'ns'
raise ValueError("invalid timedelta unit {arg} provided"
.format(arg=arg))
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
"""Convert string 'r' to a timedelta object."""
try:
result = convert_to_timedelta64(r, unit)
except ValueError:
if errors == 'raise':
raise
elif errors == 'ignore':
return r
# coerce
result = pd.NaT
if box:
result = tslibs.Timedelta(result)
return result
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'):
arg = np.array(list(arg), dtype='O')
# these are shortcut-able
if is_timedelta64_dtype(arg):
value = arg.astype('timedelta64[ns]')
elif is_integer_dtype(arg):
value = arg.astype('timedelta64[{unit}]'.format(unit=unit)).astype(
'timedelta64[ns]', copy=False)
else:
try:
value = array_to_timedelta64(ensure_object(arg),
unit=unit, errors=errors)
value = value.astype('timedelta64[ns]', copy=False)
except ValueError:
if errors == 'ignore':
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit='ns', name=name)
return value
|
bsd-3-clause
|
liberatorqjw/scikit-learn
|
sklearn/cross_validation.py
|
6
|
62788
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import _num_samples, check_array
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None, n_bootstraps=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
if n_bootstraps is not None: # pragma: no cover
warnings.warn("n_bootstraps was renamed to n_iter and will "
"be removed in 0.16.", DeprecationWarning)
n_iter = n_bootstraps
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(np.bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust lenght of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.")
force_arrays = options.pop('force_arrays', False)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if force_arrays:
warnings.warn("The force_arrays option is deprecated and will be "
"removed in 0.18.", DeprecationWarning)
arrays = [check_array(x, 'csr', ensure_2d=False,
force_all_finite=False) if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
dblalock/flock
|
python/analyze/classify/sota.py
|
1
|
1053
|
# make a giant pandas dataframe I can combine with my own crap
import pandas as pd
from results_utils import *
from munge import deleteCols
def datasetStats():
# note that these cols probably have a lot of nans
df = buildCombinedResults()
return extractStatsCols(df)
def rawSotaResults():
# start with a df with one col of dataset names and n-1 cols of
# classifier err rates
errs = buildCombinedResults()
# pull out set of datasets
datasets = errs[DATASET_COL_NAME]
# get a df with just the (sorted) classifier cols
errs = removeStatsCols(errs)
deleteCols(errs, DATASET_COL_NAME)
errs.reindex_axis(sorted(errs.columns), axis=1)
allRows = []
for colName in errs.columns:
# print colName
col = errs[colName]
for i, errRate in enumerate(col):
allRows.append({
DATASET_COL_NAME: datasets.iloc[i],
ACCURACY_COL_NAME: 1.0 - errRate,
CLASSIFIER_COL_NAME: colName,
})
union = pd.DataFrame.from_records(allRows)
union = union[RESULT_COL_NAMES]
return union
if __name__ == '__main__':
print rawSotaResults()
|
mit
|
Titan-C/scikit-learn
|
sklearn/manifold/isomap.py
|
39
|
7519
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
gallir/influxdb-python
|
influxdb/influxdb08/dataframe_client.py
|
6
|
6828
|
# -*- coding: utf-8 -*-
"""
DataFrame client for InfluxDB
"""
import math
import warnings
from .client import InfluxDBClient
class DataFrameClient(InfluxDBClient):
"""
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
def __init__(self, ignore_nan=True, *args, **kwargs):
super(DataFrameClient, self).__init__(*args, **kwargs)
try:
global pd
import pandas as pd
except ImportError as ex:
raise ImportError('DataFrameClient requires Pandas, '
'"{ex}" problem importing'.format(ex=str(ex)))
self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
self.ignore_nan = ignore_nan
def write_points(self, data, *args, **kwargs):
"""
Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
data = [self._convert_dataframe_to_json(
name=key,
dataframe=data_frame.ix[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, data, *args, **kwargs)
return True
else:
data = [self._convert_dataframe_to_json(
name=key, dataframe=dataframe, time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, data, *args, **kwargs)
def write_points_with_precision(self, data, time_precision='s'):
"""
DEPRECATED. Write to multiple time series names
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``DataFrameClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self.write_points(data, time_precision='s')
def query(self, query, time_precision='s', chunked=False):
"""
Quering data into DataFrames.
Returns a DataFrame for a single time series and a map for multiple
time series with the time series as value and its name as key.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
result = InfluxDBClient.query(self, query=query,
time_precision=time_precision,
chunked=chunked)
if len(result) == 0:
return result
elif len(result) == 1:
return self._to_dataframe(result[0], time_precision)
else:
ret = {}
for time_series in result:
ret[time_series['name']] = self._to_dataframe(time_series,
time_precision)
return ret
def _to_dataframe(self, json_result, time_precision):
dataframe = pd.DataFrame(data=json_result['points'],
columns=json_result['columns'])
if 'sequence_number' in dataframe.keys():
dataframe.sort(['time', 'sequence_number'], inplace=True)
else:
dataframe.sort(['time'], inplace=True)
pandas_time_unit = time_precision
if time_precision == 'm':
pandas_time_unit = 'ms'
elif time_precision == 'u':
pandas_time_unit = 'us'
dataframe.index = pd.to_datetime(list(dataframe['time']),
unit=pandas_time_unit,
utc=True)
del dataframe['time']
return dataframe
def _convert_dataframe_to_json(self, dataframe, name, time_precision='s'):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.tseries.period.PeriodIndex) or
isinstance(dataframe.index, pd.tseries.index.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')
dataframe.index = dataframe.index.to_datetime()
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
dataframe['time'] = [self._datetime_to_epoch(dt, time_precision)
for dt in dataframe.index]
data = {'name': name,
'columns': [str(column) for column in dataframe.columns],
'points': [self._convert_array(x) for x in dataframe.values]}
return data
def _convert_array(self, array):
try:
global np
import numpy as np
except ImportError as ex:
raise ImportError('DataFrameClient requires Numpy, '
'"{ex}" problem importing'.format(ex=str(ex)))
if self.ignore_nan:
number_types = (int, float, np.number)
condition = (all(isinstance(el, number_types) for el in array) and
np.isnan(array))
return list(np.where(condition, None, array))
else:
return list(array)
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 's':
return seconds
elif time_precision == 'm' or time_precision == 'ms':
return seconds * 1000
elif time_precision == 'u':
return seconds * 1000000
|
mit
|
BlackArbsCEO/trading-with-python
|
lib/widgets.py
|
78
|
3012
|
# -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_()
|
bsd-3-clause
|
kpj/SDEMotif
|
publication_figures.py
|
1
|
4040
|
"""
Create nice looking publication figures
"""
import os
import numpy as np
import pandas as pd
import networkx as nx
import seaborn as sns
import matplotlib as mpl
import matplotlib.pylab as plt
from tqdm import tqdm
from main import analyze_system
from setup import generate_basic_system
from nm_data_generator import add_node_to_system
def visualize_node_influence():
""" Compare examples where fourth node perturbs system and where it doesn't
"""
def simulate(syst, reps=1000):
matrices = []
with tqdm(total=reps) as pbar:
while reps >= 0:
_, mat, _ = analyze_system(syst, repetition_num=1)
if mat is None:
continue
pbar.update()
reps -= 1
if mat.shape == (4, 4):
mat = mat[:-1, :-1]
assert mat.shape == (3, 3)
matrices.append(mat)
return np.asarray(matrices)
def plot_correlation_hist(matrices, ax):
for i, row in enumerate(matrices.T):
for j, series in enumerate(row):
if i == j: break
sns.distplot(series, ax=ax, label=r'$c_{{{},{}}}$'.format(i,j))
ax.set_xlabel('correlation')
ax.set_ylabel('count')
ax.set_xlim((-1,1))
ax.legend(loc='best')
def plot_system(syst, ax):
graph = nx.from_numpy_matrix(syst.jacobian.T, create_using=nx.DiGraph())
nx.draw(
graph, ax=ax,
with_labels=True)
ax.axis('off')
ax.set_xticks([], [])
ax.set_yticks([], [])
# generate systems
basic_system = generate_basic_system()
more_systs = add_node_to_system(basic_system)
similar_system = more_systs[42]
different_system = more_systs[22] # 52
systems = {
'basic': basic_system,
'similar': similar_system,
'different': different_system
}
# simulations
matrices = {}
for name, syst in systems.items():
matrices[name] = (syst, simulate(syst))
# plot result
for name, (syst, mats) in matrices.items():
plt.figure()
plot_correlation_hist(mats, plt.gca())
plot_system(syst, plt.axes([.3,.5,.3,.3]))
plt.savefig(f'images/node_influence_{name}.pdf')
def real_data_example():
""" Show example with cocoa/rhodo MS data
"""
def read_peaks(fname):
df_fname = fname + '.df'
if not os.path.exists(df_fname):
df = pd.read_csv(fname)
dat = {'sample_name': [], 'mz': [], 'intensity': []}
for i, row in tqdm(df.iterrows(), total=df.shape[0]):
for ind in row.index:
if ind.startswith('LC.MS'):
dat['sample_name'].append(ind)
dat['mz'].append(row['mz'])
dat['intensity'].append(row[ind])
out_df = pd.DataFrame(dat)
out_df.to_csv(df_fname)
else:
print(f'Using cached data for "{fname}"')
out_df = pd.read_csv(df_fname, index_col=0)
return out_df
df = read_peaks('data/rl_data.csv')
df.set_index('mz', inplace=True)
# plot overview
plt.figure(figsize=(20,6))
subset = np.random.choice(df['sample_name'].unique(), size=3)
df = df[df['sample_name'].isin(subset)]
series_list = []
ax = plt.subplot(121)
for sample, group in df.groupby('sample_name'):
ax.plot(group.index, group['intensity'], label=sample)
series_list.append(group['intensity'])
ax.set_xlabel('mz')
ax.set_ylabel('intensity')
ax.set_yscale('log')
ax.legend(loc='best')
cur = pd.concat(series_list, axis=1)
corrs = cur.corr()
sns.heatmap(corrs, ax=plt.subplot(122))
plt.tight_layout()
plt.savefig('images/rl_example.pdf')
def main():
#visualize_node_influence()
real_data_example()
if __name__ == '__main__':
sns.set_style('white')
plt.style.use('seaborn-poster')
main()
|
mit
|
gravitino/cudadtw
|
results/LDTW/plot_results.py
|
2
|
1580
|
import numpy as np
import pylab as pl
import matplotlib
Ms = [128, 256, 512, 1024, 2048, 4096]
fig = pl.figure()
ax = fig.add_subplot(111)
pl.rcParams.update({'font.size': 30})
gpu10avg = np.array([0.3338633, 0.6289851, 1.211321,
2.370729, 4.705315, 12.31788])
gpu10std = np.array([0.0051115233, 0.0075934506, 0.0088027224,
0.008916461, 0.0141416754, 0.1283775664])
ax.errorbar(Ms, gpu10avg, fmt="o", yerr=gpu10std, c="blue")
p1, = ax.plot(Ms, gpu10avg, color="blue", linestyle="-")
omp10avg = np.array([8.768149, 17.954, 36.82623,
75.34849, 152.4197, 305.3048])
omp10std = np.array([0.176690129, 0.1663725471, 0.129380971,
0.2575330203, 0.9130701628, 3.7794122941])
ax.errorbar(Ms, omp10avg, fmt="s", yerr=omp10std, c="green")
p2, = ax.plot(Ms, omp10avg, color="green", linestyle="--")
cpu10avg = np.array([58.87119, 117.7952, 236.9547, 478.7272,
970.7637, 1964.442])
cpu10std = np.array([1.1404732765, 0.9450350728, 2.4774986063,
3.65465, 4.8321658831, 10.6585738049])
ax.errorbar(Ms, cpu10avg, fmt="v", yerr=cpu10std, c="red")
p3, = ax.plot(Ms, cpu10avg, color="red", linestyle="-.")
pl.legend([p1, p2, p3], ["GPU-10", "CPU-openmp-10", "CPU-single-10"])
ax.set_title("subsequence CLDTW (ecg dataset)")
ax.set_xlabel('query length')
ax.set_ylabel('execution time in seconds')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xticks(Ms)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.LogFormatter(2))
pl.tight_layout()
pl.show()
|
gpl-3.0
|
fmfn/UnbalancedDataset
|
imblearn/tensorflow/tests/test_generator.py
|
2
|
5390
|
from distutils.version import LooseVersion
import pytest
import numpy as np
from scipy import sparse
from sklearn.datasets import load_iris
from imblearn.datasets import make_imbalance
from imblearn.under_sampling import NearMiss
from imblearn.over_sampling import RandomOverSampler
from imblearn.tensorflow import balanced_batch_generator
tf = pytest.importorskip("tensorflow")
@pytest.fixture
def data():
X, y = load_iris(return_X_y=True)
X, y = make_imbalance(X, y, {0: 30, 1: 50, 2: 40})
X = X.astype(np.float32)
return X, y
def check_balanced_batch_generator_tf_1_X_X(dataset, sampler):
X, y = dataset
batch_size = 10
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=None,
sampler=sampler,
batch_size=batch_size,
random_state=42,
)
learning_rate = 0.01
epochs = 10
input_size = X.shape[1]
output_size = 3
# helper functions
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def accuracy(y_true, y_pred):
return np.mean(np.argmax(y_pred, axis=1) == y_true)
# input and output
data = tf.placeholder("float32", shape=[None, input_size])
targets = tf.placeholder("int32", shape=[None])
# build the model and weights
W = init_weights([input_size, output_size])
b = init_weights([output_size])
out_act = tf.nn.sigmoid(tf.matmul(data, W) + b)
# build the loss, predict, and train operator
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=out_act, labels=targets
)
loss = tf.reduce_sum(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
predict = tf.nn.softmax(out_act)
# Initialization of all variables in the graph
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
for i in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
sess.run(
[train_op, loss],
feed_dict={data: X_batch, targets: y_batch},
)
# For each epoch, run accuracy on train and test
predicts_train = sess.run(predict, feed_dict={data: X})
print(f"epoch: {e} train accuracy: {accuracy(y, predicts_train):.3f}")
def check_balanced_batch_generator_tf_2_X_X_compat_1_X_X(dataset, sampler):
tf.compat.v1.disable_eager_execution()
X, y = dataset
batch_size = 10
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=None,
sampler=sampler,
batch_size=batch_size,
random_state=42,
)
learning_rate = 0.01
epochs = 10
input_size = X.shape[1]
output_size = 3
# helper functions
def init_weights(shape):
return tf.Variable(tf.random.normal(shape, stddev=0.01))
def accuracy(y_true, y_pred):
return np.mean(np.argmax(y_pred, axis=1) == y_true)
# input and output
data = tf.compat.v1.placeholder("float32", shape=[None, input_size])
targets = tf.compat.v1.placeholder("int32", shape=[None])
# build the model and weights
W = init_weights([input_size, output_size])
b = init_weights([output_size])
out_act = tf.nn.sigmoid(tf.matmul(data, W) + b)
# build the loss, predict, and train operator
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=out_act, labels=targets
)
loss = tf.reduce_sum(input_tensor=cross_entropy)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
predict = tf.nn.softmax(out_act)
# Initialization of all variables in the graph
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for e in range(epochs):
for i in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
sess.run(
[train_op, loss],
feed_dict={data: X_batch, targets: y_batch},
)
# For each epoch, run accuracy on train and test
predicts_train = sess.run(predict, feed_dict={data: X})
print(f"epoch: {e} train accuracy: {accuracy(y, predicts_train):.3f}")
@pytest.mark.parametrize("sampler", [None, NearMiss(), RandomOverSampler()])
def test_balanced_batch_generator(data, sampler):
if LooseVersion(tf.__version__) < "2":
check_balanced_batch_generator_tf_1_X_X(data, sampler)
else:
check_balanced_batch_generator_tf_2_X_X_compat_1_X_X(data, sampler)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_function_sparse(data, keep_sparse):
X, y = data
training_generator, steps_per_epoch = balanced_batch_generator(
sparse.csr_matrix(X),
y,
keep_sparse=keep_sparse,
batch_size=10,
random_state=42,
)
for idx in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
|
mit
|
MJuddBooth/pandas
|
pandas/tests/test_common.py
|
2
|
3253
|
# -*- coding: utf-8 -*-
import collections
from functools import partial
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp
from pandas.core import common as com, ops
def test_get_callable_name():
getname = com.get_callable_name
def fn(x):
return x
lambda_ = lambda x: x # noqa: E731
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_random_state():
import numpy.random as npr
# Check with seed
state = com.random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com.random_state('test')
with pytest.raises(ValueError):
com.random_state(5.5)
@pytest.mark.parametrize('left, right, expected', [
(Series([1], name='x'), Series([2], name='x'), 'x'),
(Series([1], name='x'), Series([2], name='y'), None),
(Series([1]), Series([2], name='x'), None),
(Series([1], name='x'), Series([2]), None),
(Series([1], name='x'), [2], 'x'),
([1], Series([2], name='y'), 'y')])
def test_maybe_match_name(left, right, expected):
assert ops._maybe_match_name(left, right) == expected
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com.dict_compat(data_datetime64) == expected)
assert (com.dict_compat(expected) == expected)
assert (com.dict_compat(data_unchanged) == data_unchanged)
def test_standardize_mapping():
# No uninitialized defaultdicts
with pytest.raises(TypeError):
com.standardize_mapping(collections.defaultdict)
# No non-mapping subtypes, instance
with pytest.raises(TypeError):
com.standardize_mapping([])
# No non-mapping subtypes, class
with pytest.raises(TypeError):
com.standardize_mapping(list)
fill = {'bad': 'data'}
assert (com.standardize_mapping(fill) == dict)
# Convert instance to type
assert (com.standardize_mapping({}) == dict)
dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)
def test_git_version():
# GH 21295
git_version = pd.__git_version__
assert len(git_version) == 40
assert all(c in string.hexdigits for c in git_version)
|
bsd-3-clause
|
zymsys/sms-tools
|
lectures/03-Fourier-properties/plots-code/shift.py
|
26
|
1223
|
import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.signal import sawtooth
sys.path.append('../../../software/models/')
import dftModel as DF
N = 128
x1 = sawtooth(2*np.pi*np.arange(-N/2,N/2)/float(N))
x2 = sawtooth(2*np.pi*np.arange(-N/2-2,N/2-2)/float(N))
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(321)
plt.title('x1=x[n]')
plt.plot(np.arange(-N/2, N/2, 1.0), x1, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(322)
plt.title('x2=x[n-2]')
plt.plot(np.arange(-N/2, N/2, 1.0), x2, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(323)
plt.title('mX1')
plt.plot(np.arange(0, mX1.size, 1.0), mX1, 'r', lw=1.5)
plt.axis([0,mX1.size,min(mX1),max(mX1)])
plt.subplot(324)
plt.title('mX2')
plt.plot(np.arange(0, mX2.size, 1.0), mX2, 'r', lw=1.5)
plt.axis([0,mX2.size,min(mX2),max(mX2)])
plt.subplot(325)
plt.title('pX1')
plt.plot(np.arange(0, pX1.size, 1.0), pX1, 'c', lw=1.5)
plt.axis([0,pX1.size,min(pX1),max(pX2)])
plt.subplot(326)
plt.title('pX2')
plt.plot(np.arange(0, pX2.size, 1.0), pX2, 'c', lw=1.5)
plt.axis([0,pX2.size,min(pX2),max(pX2)])
plt.tight_layout()
plt.savefig('shift.png')
plt.show()
|
agpl-3.0
|
nishant-jain-94/Autofill
|
notebooks/POS_MultiClass_LSTM_1024_1024_mse_sigmoid_3_128.py
|
1
|
5457
|
# coding: utf-8
# In[1]:
import numpy as np
import os
import sys
import h5py
import datetime
import json
import pandas as pd
import itertools
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.callbacks import ModelCheckpoint
from embeddings import Embeddings
from keras.utils import to_categorical
# ## Setting Parameters
# In[2]:
word_embedding_dimension = 300
word_embedding_window_size = 4
batch_size = 128 # 32, 64, 128
epochs = 25 # 10, 15, 30
window_size = 4 # 3, 4, 5
accuracy_threshold = 0.85
activation = 'sigmoid' # sigmoid, relu, softmax
custom_accuracy = 0
loss_function = 'mse' # mse
# In[3]:
model_name = 'POS_MultiClass_LSTM' + '_1024_1024_' + loss_function + "_" + activation + "_" + str(window_size) + "_" + str(batch_size) #MODEL_NAME #POS-LSTM
# In[4]:
with open('../data/word_tokenized_sentence_300_4_1_4.json', 'r') as myfile:
raw_data = json.load(myfile)
# In[6]:
embeddings = Embeddings(word_embedding_dimension, word_embedding_window_size, 1, 4)
pos2index, index2pos = embeddings.get_pos_vocabulary()
# In[8]:
test_data = embeddings.find_POS(raw_data) #find_POS(raw_data)
# In[ ]:
whole_test_data = [word for sent in test_data for word in sent]
# In[ ]:
new_data = []
for i in range(len(whole_test_data)-window_size-1):
x = whole_test_data[i:i + window_size + 1]
new_data.append(x)
# In[ ]:
new_data = [[data[:3], data[3]] for data in new_data]
# In[ ]:
vocab = ['PUNCT','SYM','X','ADJ','VERB','CONJ','NUM','DET','ADV','PROPN','NOUN','PART','INTJ','CCONJ','SPACE','ADP','SCONJ','AUX', 'PRON']
# In[ ]:
new_data.sort()
new_seq_in = []
new_seq_out = []
for i,j in itertools.groupby(new_data, lambda x: x[0]):
ex = set(list(zip(*list(j)))[1])
inputs = [to_categorical(pos2index[x_pos], num_classes = len(vocab)) for x_pos in i]
new_seq_in_each = [each[0] for each in inputs]
new_seq_in.append(new_seq_in_each)
outputs = [(to_categorical(pos2index[y_pos], num_classes = len(vocab))).tolist()[0] for y_pos in ex]
new_seq_out_each = [each for each in outputs]
new_seq_out_each = np.sum(new_seq_out_each, axis=0)
new_seq_out.append(new_seq_out_each)
new_seq_in = np.array(new_seq_in)
new_seq_out = np.array(new_seq_out)
# In[ ]:
# Changes to the model to be done here
model = Sequential()
model.add(LSTM(1024, input_shape=(new_seq_in.shape[1], new_seq_in.shape[2]), return_sequences=True))
#model.add(Dropout(0.2))
model.add(LSTM(1024))
#model.add(Dropout(0.2))
model.add(Dense(len(vocab), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
model.summary()
# In[ ]:
model_weights_path = "../weights/"+ model_name
if not os.path.exists(model_weights_path):
os.makedirs(model_weights_path)
checkpoint_path = model_weights_path + '/pos_weights.{epoch:02d}-{val_acc:.2f}.hdf5'
checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='val_acc', verbose=1, save_best_only=False, mode='max')
# In[ ]:
model_fit_summary = model.fit(new_seq_in, new_seq_out, epochs=epochs, batch_size=batch_size, verbose=1, validation_split=0.25, callbacks=[checkpoint])
# In[ ]:
check_ori = 0
check_pre = 0
counter = 0
test_start = 0
test_end = 100
list_for_hist_words = []
list_for_hist_index = []
list_for_hist_words_ori = []
list_for_hist_index_ori = []
for i in range(test_start, test_end):
test_no = i
to_predict = new_seq_in[test_no:test_no+1]
y_ans = model.predict(to_predict)
for word, corr_int in pos2index.items():
if corr_int == np.argmax(y_ans):
#print ("pridicted: ",word, corr_int)
check_pre = corr_int
list_for_hist_words.append(word)
list_for_hist_index.append(corr_int)
if corr_int == np.argmax(new_seq_out[test_no:test_no+1]):
#print ("original: ",word, corr_int)
check_ori = corr_int
list_for_hist_words_ori.append(word)
list_for_hist_index_ori.append(corr_int)
if check_ori == check_pre :
counter += 1
#print('\n')
print("Correct predictions: ",counter, '\nTotal Predictions: ',test_end - test_start)
custom_accuracy = counter/(test_end-test_start)
# In[ ]:
model_results = model_fit_summary.history
model_results.update(model_fit_summary.params)
model_results["word_embedding_dimension"] = word_embedding_dimension
model_results["word_embedding_window_size"] = word_embedding_window_size
model_results["window_size"] = window_size
model_results["batch_size"] = batch_size
model_results["epochs"] = epochs
model_results["model_name"] = model_name
model_results["accuracy_threshold"] = accuracy_threshold
model_results["activation"] = activation
model_results["custom_accuracy"] = custom_accuracy
model_results["loss_function"] = loss_function
model_results["layers"] = []
model_results["dropouts"] = []
for layer in model.layers:
if hasattr(layer, "units"):
layer_summary = {}
layer_summary["units"] = layer.get_config()["units"]
layer_summary["name"] = layer.name
model_results["layers"].append(layer_summary)
if hasattr(layer, "rate"):
dropout_summary = {}
dropout_summary["rate"] = layer.get_config()["rate"]
model_results["dropouts"].append(dropout_summary)
text_file_path = "../weights/{0}/model_results.json".format(model_name)
with open(text_file_path, "w") as f:
json.dump(model_results, f)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.