repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
elijah513/scikit-learn
|
sklearn/utils/tests/test_shortest_path.py
|
88
|
2828
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
|
bsd-3-clause
|
opencobra/memote
|
src/memote/experimental/experimental_base.py
|
2
|
3478
|
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a class for medium definitions."""
import json
import logging
from goodtables import validate
from importlib_resources import open_text
# Importing the checks is necessary in order to register them.
import memote.experimental.schemata
from memote.experimental.checks import UnknownIdentifier # noqa: F401
from memote.experimental.tabular import read_tabular
__all__ = ("ExperimentalBase",)
LOGGER = logging.getLogger(__name__)
class ExperimentalBase(object):
"""Represent a specific medium condition."""
SCHEMA = None
TRUTHY = {"true", "True", "TRUE", "1", "yes", "Yes", "YES"}
def __init__(self, identifier, obj, filename, **kwargs):
"""
Initialize a medium.
Parameters
----------
identifier : str
obj : dict
filename : str or pathlib.Path
The full file path. May be a compressed file.
kwargs
"""
super(ExperimentalBase, self).__init__(**kwargs)
self.id = identifier
self.label = obj.get("label")
if self.label is None:
self.label = ""
self.filename = filename
self.data = None
self.schema = None
def load(self, dtype_conversion=None):
"""
Load the data table and corresponding validation schema.
Parameters
----------
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
"""
self.data = read_tabular(self.filename, dtype_conversion)
with open_text(
memote.experimental.schemata, self.SCHEMA, encoding="utf-8"
) as file_handle:
self.schema = json.load(file_handle)
def validate(self, model, checks=None):
"""Use a defined schema to validate the given table."""
if checks is None:
checks = []
records = self.data.to_dict("records")
self.evaluate_report(
validate(
records,
headers=list(records[0]),
preset="table",
schema=self.schema,
order_fields=True,
checks=checks,
)
)
@staticmethod
def evaluate_report(report):
"""Iterate over validation errors."""
if report["valid"]:
return
for warn in report["warnings"]:
LOGGER.warning(warn)
# We only ever test one table at a time.
for err in report["tables"][0]["errors"]:
LOGGER.error(err["message"])
raise ValueError("Invalid data file. Please see errors above.")
|
apache-2.0
|
mayblue9/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
258
|
3336
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
liufuyang/deep_learning_tutorial
|
char-based-classification/repeat-offbit-sentiment/doc-rnn2.py
|
1
|
4973
|
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Input, Dropout, MaxPooling1D, Conv1D
from keras.layers import LSTM, Lambda
from keras.layers import TimeDistributed, Bidirectional
from keras.layers.normalization import BatchNormalization
import numpy as np
import tensorflow as tf
import re
import keras.callbacks
import sys
import os
def binarize(x, sz=71):
return tf.to_float(tf.one_hot(x, sz, on_value=1, off_value=0, axis=-1))
def binarize_outshape(in_shape):
return in_shape[0], in_shape[1], 71
def striphtml(html):
p = re.compile(r'<.*?>')
return p.sub('', html)
def clean(s):
return re.sub(r'[^\x00-\x7f]', r'', s)
# record history of training
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accuracies = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accuracies.append(logs.get('acc'))
total = len(sys.argv)
cmdargs = str(sys.argv)
print ("Script name: %s" % str(sys.argv[0]))
checkpoint = None
if len(sys.argv) == 2:
if os.path.exists(str(sys.argv[1])):
print ("Checkpoint : %s" % str(sys.argv[1]))
checkpoint = str(sys.argv[1])
data = pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
txt = ''
docs = []
sentences = []
sentiments = []
for cont, sentiment in zip(data.review, data.sentiment):
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', clean(striphtml(cont)))
sentences = [sent.lower() for sent in sentences]
docs.append(sentences)
sentiments.append(sentiment)
num_sent = []
for doc in docs:
num_sent.append(len(doc))
for s in doc:
txt += s
chars = set(txt)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
print('Sample doc{}'.format(docs[1200]))
maxlen = 512
max_sentences = 15
X = np.ones((len(docs), max_sentences, maxlen), dtype=np.int64) * -1
y = np.array(sentiments)
for i, doc in enumerate(docs):
for j, sentence in enumerate(doc):
if j < max_sentences:
for t, char in enumerate(sentence[-maxlen:]):
X[i, j, (maxlen - 1 - t)] = char_indices[char]
print('Sample chars in X:{}'.format(X[1200, 2]))
print('y:{}'.format(y[1200]))
ids = np.arange(len(X))
np.random.shuffle(ids)
# shuffle
X = X[ids]
y = y[ids]
X_train = X[:20000]
X_test = X[20000:]
y_train = y[:20000]
y_test = y[20000:]
filter_length = [5, 3, 3]
nb_filter = [196, 196, 256]
pool_length = 2
# document input
document = Input(shape=(max_sentences, maxlen), dtype='int64')
# sentence input
in_sentence = Input(shape=(maxlen,), dtype='int64')
# char indices to one hot matrix, 1D sequence to 2D
embedded = Lambda(binarize, output_shape=binarize_outshape)(in_sentence)
# embedded: encodes sentence
for i in range(len(nb_filter)):
embedded = Conv1D(filters=nb_filter[i],
kernel_size=filter_length[i],
padding='valid',
activation='relu',
kernel_initializer='glorot_normal',
strides=1)(embedded)
embedded = Dropout(0.1)(embedded)
embedded = MaxPooling1D(pool_size=pool_length)(embedded)
bi_lstm_sent = \
Bidirectional(LSTM(128, return_sequences=False, dropout=0.15, recurrent_dropout=0.15, implementation=0))(embedded)
# sent_encode = merge([forward_sent, backward_sent], mode='concat', concat_axis=-1)
sent_encode = Dropout(0.3)(bi_lstm_sent)
# sentence encoder
encoder = Model(inputs=in_sentence, outputs=sent_encode)
encoder.summary()
encoded = TimeDistributed(encoder)(document)
# encoded: sentences to bi-lstm for document encoding
b_lstm_doc = \
Bidirectional(LSTM(128, return_sequences=False, dropout=0.15, recurrent_dropout=0.15, implementation=0))(encoded)
output = Dropout(0.3)(b_lstm_doc)
output = Dense(128, activation='relu')(output)
output = Dropout(0.3)(output)
output = Dense(1, activation='sigmoid')(output)
model = Model(inputs=document, outputs=output)
model.summary()
if checkpoint:
model.load_weights(checkpoint)
file_name = os.path.basename(sys.argv[0]).split('.')[0]
check_cb = keras.callbacks.ModelCheckpoint('checkpoints/' + file_name + '.{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss',
verbose=0, save_best_only=True, mode='min')
earlystop_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=7, verbose=1, mode='auto')
history = LossHistory()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=10,
epochs=5, shuffle=True, callbacks=[earlystop_cb, check_cb, history])
# just showing access to the history object
print history.losses
print history.accuracies
|
mit
|
craftbase/chicago-murders
|
src/data/extractor/parsehtml.py
|
1
|
4699
|
import datetime
import logging
import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.parser import parse
from selenium import webdriver
logging.basicConfig(filename='scrape.log', level=20)
logging.info('Initialized logger')
CSV_FILE = "victim_info_2012_2017.csv"
def get_soup_object(html):
return BeautifulSoup(html)
def get_dynamic_content(link):
try:
driver = webdriver.PhantomJS()
logging.info("Webdriver initiated {0}".format(driver))
except Exception as exp:
logging.fatal("Webdriver exception: {exception}".format(exception=exp))
raise exp
driver.get(link)
r = requests.get(link)
if r.status_code != 200:
raise Exception("Status code is not 200. Exiting!")
html = driver.page_source
soup = get_soup_object(html)
# Dump the page source to log for debugging purpose, this will work only if level is set to 10 in line number 5
logging.debug(soup.prettify())
parsedata(soup)
def parsedata(soup):
victims = soup.find_all("div", {"class": "homicide"})
for victim in victims:
# Set default values as NA for each victim
date = name = age = race = cause = neighbourhood = murder_time = addr = "NA"
try:
date = victim.find("div", {"class": "date"}).get_text().strip()
except Exception as exp:
pass
try:
name = victim.find("h2", {"class": "name"}).get_text().strip()
except Exception as exp:
pass
try:
age = victim.find("div", {"class": "age"}).contents[1].strip()
except Exception as exp:
pass
try:
race = victim.find("div", {"class": "race"}).contents[1].strip()
except Exception as exp:
pass
try:
cause = victim.find("div", {"class": "cause"}).contents[1].strip()
except Exception as exp:
pass
try:
neighbourhood = victim.find("div", {"class": "neighborhood"}).contents[1].strip()
except Exception as exp:
pass
try:
murder_time = victim.find("span", {"class": "murder_time"}).get_text().strip()
except Exception as exp:
pass
try:
addr = victim.find("div", {"class": "address"}).contents[2].strip()
except Exception as exp:
pass
victim_info = {'name': name,
'date': date,
'age': age,
'race': race,
'cause': cause,
'neighbourhood': neighbourhood,
'time': murder_time,
'address': addr
}
victim_info_df = pd.DataFrame(victim_info, index=[0])
# if file does not exist write header
if not os.path.isfile(CSV_FILE):
victim_info_df.to_csv(CSV_FILE, header=True, encoding='utf-8')
else: # else it exists so append without writing the header
victim_info_df.to_csv(CSV_FILE, mode='a', header=False, encoding='utf-8')
print "Date " + date + " Name " + name + " Age " + age + " Race " + race + " Cause " + cause + " Neigbourhood " + neighbourhood + " Time " + murder_time + " Address " + addr
def get_all_data():
for year in range(2012, 2018):
for month in range(1, 13):
link = "https://www.dnainfo.com/chicago/{}-chicago-murders/timeline?mon={}".format(str(year), str(month))
get_dynamic_content(link)
def get_last_entry_date():
df = pd.read_csv(CSV_FILE)
total_rows = len(df.index)
date = df['date'].values[total_rows - 1]
date_str = parse(date)
date_str = date_str.strftime('%Y%m%d')
return date_str
def update_data():
last_date = get_last_entry_date()
print "Last updated entry was on {}".format(last_date)
entry_year = int(last_date[:4])
entry_month = int(last_date[4:6])
current_date = datetime.date.today()
current_month = int(current_date.month)
current_year = int(current_date.year)
for year in range(entry_year, current_year + 1):
for month in range(1, 13):
if year == entry_year and month <= entry_month:
continue
link = "https://www.dnainfo.com/chicago/{}-chicago-murders/timeline?mon={}".format(str(year), str(month))
get_dynamic_content(link)
def get_data():
if not os.path.isfile(CSV_FILE):
print "CSV file not found. Creating CSV file and appending data to it"
get_all_data()
else:
print "CSV file found."
update_data()
get_data()
# print get_last_entry_date()
|
mit
|
numenta/htmresearch
|
projects/union_path_integration/plot_capacity_heatmap.py
|
4
|
3714
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot comparison chart."""
import argparse
from collections import defaultdict
import json
import os
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy.optimize
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def chart2(inFilename, outFilename, cellCounts, featureCounts):
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
capacitiesByParams = defaultdict(list)
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
locationModuleWidth = exp[0]["locationModuleWidth"]
numUniqueFeatures = exp[0]["numFeatures"]
cellsPerModule = locationModuleWidth*locationModuleWidth
capacitiesByParams[(cellsPerModule, numUniqueFeatures)].append(exp[1]["numObjects"])
meanCapacityByParams = {}
for params, capacities in capacitiesByParams.iteritems():
meanCapacityByParams[params] = sum(capacities) / float(len(capacities))
xlabels = [str(v) for v in featureCounts]
ylabels = [str(v) for v in cellCounts]
plotData = np.empty((len(cellCounts), len(featureCounts)), dtype="float")
for i, cellsPerModule in enumerate(cellCounts):
for j, numUniqueFeatures in enumerate(featureCounts):
plotData[i, j] = meanCapacityByParams[(cellsPerModule, numUniqueFeatures)]
fig, ax = plt.subplots(figsize=(3.25, 3.25), tight_layout = {"pad": 0})
# Customize vmax so that the colors stay suffiently dark so that the white
# text is readable.
plt.imshow(plotData,
norm=colors.LogNorm(vmin=plotData.min(), vmax=plotData.max()*3.0))
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_xticks(np.arange(len(xlabels)))
ax.set_yticks(np.arange(len(ylabels)))
ax.set_xticklabels(xlabels)
ax.set_yticklabels(ylabels)
plt.setp(ax.get_xticklabels(), rotation=45, ha="left", rotation_mode="anchor")
for i in xrange(len(ylabels)):
for j in xrange(len(xlabels)):
text = ax.text(j, i, str(int(plotData[i, j])), ha="center", va="center",
color="w")
plt.xlabel("Number of Unique Features")
plt.ylabel("Cells Per Module")
filePath = os.path.join(CHART_DIR, outFilename)
print "Saving", filePath
plt.savefig(filePath)
if __name__ == "__main__":
plt.rc("font",**{"family": "sans-serif",
"sans-serif": ["Arial"],
"size": 8})
parser = argparse.ArgumentParser()
parser.add_argument("--inFile", type=str, required=True)
parser.add_argument("--outFile", type=str, required=True)
args = parser.parse_args()
counts = [w**2 for w in [6, 8, 10, 14, 17, 20]]
chart2(args.inFile, args.outFile,
cellCounts=counts,
featureCounts=counts)
|
agpl-3.0
|
jfinkels/networkx
|
examples/drawing/lanl_routes.py
|
2
|
2274
|
#!/usr/bin/env python
"""
Routes to LANL from 186 sites on the Internet.
This uses Graphviz for layout so you need PyGraphviz or pydot.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
def lanl_graph():
""" Return the lanl internet view graph from lanl.edges
"""
import networkx as nx
try:
fh = open('lanl_routes.edgelist' , 'r')
except IOError:
print("lanl.edges not found")
raise
G = nx.Graph()
time = {}
time[0] = 0 # assign 0 to center node
for line in fh.readlines():
(head, tail, rtt) = line.split()
G.add_edge(int(head), int(tail))
time[int(head)] = float(rtt)
# get largest component and assign ping times to G0time dictionary
G0 = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)[0]
G0.rtt = {}
for n in G0:
G0.rtt[n] = time[n]
return G0
if __name__ == '__main__':
import networkx as nx
import math
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot")
G=lanl_graph()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
# use graphviz to find radial layout
pos = graphviz_layout(G, prog="twopi", root=0)
# draw nodes, coloring by rtt ping time
nx.draw(G, pos,
node_color=[G.rtt[v] for v in G],
with_labels=False,
alpha=0.5,
node_size=15)
# adjust the plot limits
xmax = 1.02 * max(xx for xx,yy in pos.values())
ymax = 1.02 * max(yy for xx,yy in pos.values())
plt.xlim(0, xmax)
plt.ylim(0, ymax)
plt.savefig("lanl_routes.png")
|
bsd-3-clause
|
vshtanko/scikit-learn
|
sklearn/linear_model/bayes.py
|
220
|
15248
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
|
bsd-3-clause
|
sinhrks/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
267
|
6813
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
|
bsd-3-clause
|
rlabbe/filterpy
|
filterpy/kalman/tests/test_rts.py
|
2
|
1603
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from filterpy.kalman import KalmanFilter
DO_PLOT = False
def test_rts():
fk = KalmanFilter(dim_x=2, dim_z=1)
fk.x = np.array([-1., 1.]) # initial state (location and velocity)
fk.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
fk.H = np.array([[1.,0.]]) # Measurement function
fk.P = .01 # covariance matrix
fk.R = 5 # state uncertainty
fk.Q = 0.001 # process uncertainty
zs = [t + random.randn()*4 for t in range(40)]
mu, cov, _, _ = fk.batch_filter (zs)
mus = [x[0] for x in mu]
M, P, _, _ = fk.rts_smoother(mu, cov)
if DO_PLOT:
p1, = plt.plot(zs,'cyan', alpha=0.5)
p2, = plt.plot(M[:,0],c='b')
p3, = plt.plot(mus,c='r')
p4, = plt.plot([0, len(zs)], [0, len(zs)], 'g') # perfect result
plt.legend([p1, p2, p3, p4],
["measurement", "RKS", "KF output", "ideal"], loc=4)
plt.show()
if __name__ == '__main__':
DO_PLOT = True
test_rts()
|
mit
|
djgagne/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/resample/conftest.py
|
2
|
4149
|
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import period_range
# The various methods we support
downsample_methods = [
"min",
"max",
"first",
"last",
"sum",
"mean",
"sem",
"median",
"prod",
"var",
"std",
"ohlc",
"quantile",
]
upsample_methods = ["count", "size"]
series_methods = ["nunique"]
resample_methods = downsample_methods + upsample_methods + series_methods
@pytest.fixture(params=downsample_methods)
def downsample_method(request):
"""Fixture for parametrization of Grouper downsample methods."""
return request.param
@pytest.fixture(params=resample_methods)
def resample_method(request):
"""Fixture for parametrization of Grouper resample methods."""
return request.param
@pytest.fixture
def simple_date_range_series():
"""
Series with date range index and random data for test purposes.
"""
def _simple_date_range_series(start, end, freq="D"):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_date_range_series
@pytest.fixture
def simple_period_range_series():
"""
Series with period range index and random data for test purposes.
"""
def _simple_period_range_series(start, end, freq="D"):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_period_range_series
@pytest.fixture
def _index_start():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq():
"""Fixture for parametrization of index, series and frame."""
return "D"
@pytest.fixture
def _index_name():
"""Fixture for parametrization of index, series and frame."""
return None
@pytest.fixture
def index(_index_factory, _index_start, _index_end, _index_freq, _index_name):
"""
Fixture for parametrization of date_range, period_range and
timedelta_range indexes
"""
return _index_factory(_index_start, _index_end, freq=_index_freq, name=_index_name)
@pytest.fixture
def _static_values(index):
"""
Fixture for parametrization of values used in parametrization of
Series and DataFrames with date_range, period_range and
timedelta_range indexes
"""
return np.arange(len(index))
@pytest.fixture
def _series_name():
"""
Fixture for parametrization of Series name for Series used with
date_range, period_range and timedelta_range indexes
"""
return None
@pytest.fixture
def series(index, _series_name, _static_values):
"""
Fixture for parametrization of Series with date_range, period_range and
timedelta_range indexes
"""
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def empty_series_dti(series):
"""
Fixture for parametrization of empty Series with date_range,
period_range and timedelta_range indexes
"""
return series[:0]
@pytest.fixture
def frame(index, _series_name, _static_values):
"""
Fixture for parametrization of DataFrame with date_range, period_range
and timedelta_range indexes
"""
# _series_name is intentionally unused
return DataFrame({"value": _static_values}, index=index)
@pytest.fixture
def empty_frame_dti(series):
"""
Fixture for parametrization of empty DataFrame with date_range,
period_range and timedelta_range indexes
"""
index = series.index[:0]
return DataFrame(index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(request, series, frame):
"""
Fixture for parametrization of Series and DataFrame with date_range,
period_range and timedelta_range indexes
"""
if request.param == Series:
return series
if request.param == DataFrame:
return frame
|
bsd-3-clause
|
zqhuang/COOP
|
lib/pyplot_scripts/PlanckFig_DE_pca.py
|
1
|
3973
|
# Configure Matplotlib options
from setup_matplotlib import *
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Rectangle, FancyBboxPatch
import planckStyle as s
from pylab import *
import numpy as np
import GetDistPlots, os
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rc, font_manager
from matplotlib.pyplot import figure, axes, plot, xlabel, ylabel, title, \
grid, savefig, show
outdir='/home/pettorin/codici/cosmomc_plots/output_getdist/clik/clik10/pca/plots/'
g = s.getSinglePlotter(plot_data='/home/pettorin/codici/cosmomc_plots/output_getdist/clik/clik10/pca/4bins/')
g.settings.setWithSubplotSize(4.0000)
# Load data
wt = np.loadtxt('/home/pettorin/codici/cosmomc_plots/output_getdist/clik/clik10/pca/pythoncode/4bins/output/weights.txt')
pca = np.loadtxt('/home/pettorin/codici/cosmomc_plots/output_getdist/clik/clik10/pca/pythoncode/4bins/output/w_reconstructed.txt')
#f = plt.figure()
#ax = f.add_subplot(1,1,1)
#col = 'blue'
#z0 = pca[:,0] # lower limit
#z1 = pca[:,2] # upper limit
#w0 = pca[:,3] # mean w
#dw = pca[:,4]
#for j in range(len(z0)-1):
# llc = (z0[j],w0[j]-2.*dw[j]) # lower left corner
# dz = z1[j]-z0[j]
# dx = 4.*dw[j]
# bbox1 = Rectangle(llc,dz,dx,transform=ax.transData,ec=col,fc=col,fill=True,alpha=0.5)
# ax.add_patch(bbox1)
#xlim((0,2.1))
#ylim((-2,1))
#f.show()
# Create the plot
#for width in [18., 12., 10., 8.8]:
for width in [10.]:
fig = plt.figure(figsize=(cm2inch(width), cm2inch(width*6/8.)))
# this should be changed for making a panel of multiple figures
ax = fig.add_subplot(111)
leg = ax.legend()
col='#008ae6'
z0 = pca[:,0] # lower limit
z1 = pca[:,2] # upper limit
w0 = pca[:,3] # mean w
dw = pca[:,4]
#
for j in range(len(z0)-2):
llc = (z0[j],w0[j]-2.*dw[j]) # lower left corner
dz = z1[j]-z0[j]
dx = 4.*dw[j]
bbox1 = Rectangle(llc,dz,dx,transform=ax.transData,ec=col,fc=col,fill=True,alpha=0.8)
ax.add_patch(bbox1)
for j in range(len(z0)-2,len(z0)-1):
llc = (z0[j],w0[j]-2.*dw[j]) # lower left corner
dz = z1[j]-z0[j]
dx = 4.*dw[j]
bbox1 = Rectangle(llc,dz,dx,transform=ax.transData,ec=col,fc=col,fill=True,alpha=0.8, label='Planck + BSH')
ax.add_patch(bbox1)
# x axis (plots a line hlines(z,xmin,xmax)
#plt.hlines(0, 1.8, 7.2,color=(.5,.5,.5),label='_nolegend_')
# legend
# leg = plt.legend(frameon=True)
#leg = plt.legend(loc='upper right')
# leg = plt.legend(loc='lower right') # frameon keyword unknown in my version
# remove box around legend
#leg.get_frame().set_edgecolor("white")
#leg.get_frame().set_alpha(.8)
# labels
plt.xlabel(r"$z$"); plt.ylabel(r"$w(z)$")
ax.yaxis.labelpad = 10*width/17.; ax.xaxis.labelpad = 10*width/17. # distance of axis label to tick labels
# reduce ticks for small figures
if width < 10:
ax.yaxis.set_major_locator(MaxNLocator(nbins=5))
# grid
# plt.grid(True, which="major", axis="both") # problem in my version
# axes limits
plt.xlim([0, 2.01]); plt.ylim([-1.8, 0.2]);
# reduce white space around figure
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
# set vertical y axis ticklables
for ticklabel in ax.yaxis.get_ticklabels():
ticklabel.set_rotation("vertical")
l = plt.legend(loc=1,prop={'size':10}, frameon = False)
colors = ["#008AE6", "#E03424"]
for color,text in zip(colors,l.get_texts()):
text.set_color(color)
# g.export(os.path.join(outdir,'wPCA.pdf'))
plt.tight_layout()
# save to pdf with right bounding box
plt.savefig("/home/pettorin/codici/cosmomc_plots/output_getdist/clik/clik10/pca/plots/wPCA.pdf", bbox_inches='tight', pad_inches=0.05)
|
gpl-3.0
|
ak4778/neural-network
|
test_neural_network.py
|
1
|
13434
|
import cPickle
import os
import gzip
import matplotlib.pyplot as plt
import pylab
import numpy as np
#### Libraries
# Standard library
import gzip
import pylab
# Third-party libraries
import numpy as np
weights=[]
biases=[]
outs=[]
costs=[]
def mvectorized_result(j,classifications):
"""Return a classifications-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
# e=[j]
e = np.zeros((classifications, 1))
e[j] = 1.0
return e
def mload_data_wrapper(tr_input,tr_output,classification = 0):
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
code."""
ni=np.array(tr_input)
no=np.array(tr_output)
input_units=len(tr_input[0])
if classification:
output_units=classification
else:
output_units=len(tr_output[0])
tr_data=(ni,no)
tr_d, va_d, te_d = (tr_data,tr_data,tr_data)
training_inputs = [np.reshape(x, (input_units, 1)) for x in tr_d[0]]
print tr_d[1]
if classification:
training_results = [mvectorized_result(y-1,output_units) for y in tr_d[1]]
else:
training_results = [np.reshape(y, (output_units,1)) for y in tr_d[1]]
#training_results = [y for y in tr_d[1]]
# print "trr type=",type(training_results)
# print training_results
training_data = zip(training_inputs, training_results)
print "training_data=",training_data
validation_inputs = [np.reshape(x, (input_units, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (input_units, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
#tr_d, va_d, te_d = mload_data()
#img=tr_d[0][99].reshape(28,28)
#print(tr_d[0][3])
#print(tr_d[1][3])
#print type(tr_d[0])
#print type(tr_d[1])
#print type(tr_d[0][0])
#print type(tr_d[1][0])
#pylab.imshow(img)
#pylab.gray()
#pylab.show()
"""
network.py
~~~~~~~~~~
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class CrossEntropyCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``. Note that np.nan_to_num is used to ensure numerical
stability. In particular, if both ``a`` and ``y`` have a 1.0
in the same slot, then the expression (1-y)*np.log(1-a)
returns nan. The np.nan_to_num ensures that that is converted
to the correct value (0.0).
"""
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer. Note that the
parameter ``z`` is not used by the method. It is included in
the method's parameters in order to make the interface
consistent with the delta method for other cost classes.
"""
return (a-y)
class QuadraticCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``.
"""
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer."""
return (a-y) * sigmoid_prime(z)
class Network(object):
def __init__(self, sizes, cost=QuadraticCost):
#def __init__(self, sizes, cost=CrossEntropyCost):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.cost = cost
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
#self.biases[0][0][0] = 0.9
#self.weights[0][0][0] = 0.6
self.biases[0][0][0] = 2
self.weights[0][0][0] = 2
#print "sizes = ",self.sizes
print "biases = ",self.biases
print "weights = ",self.weights
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
print "a=",a
for b, w in zip(self.biases, self.weights):
print "b=",b
print "w=",w
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
# print "Epoch {0}: {1} / {2}".format(
# j, self.evaluate(test_data), n_test)
self.mevaluate(test_data)
else:
print "Epoch {0} complete".format(j)
print "weights = ",weights
print "biases = ",biases
print "costs = ",costs
print "len=",len(weights)
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
weights.append(self.weights[0][0][0])
biases.append(self.biases[0][0][0])
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
cost = self.cost.fn(activations[-1][0], y[0][0])
outs.append(activations[-1][0])
costs.append(cost)
print "act=",activations[-1][0] , " y=", y[0][0] ," cost=", cost
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def dump(self, x, y):
# print "x= ",x, " y=",y
o = self.feedforward(x)
print "o =",o
return (np.argmax(o), y)
def mevaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
#test_results = [(np.argmax(self.feedforward(x)), y)
# for (x, y) in test_data]
test_results = [self.dump(x,y) for (x, y) in test_data]
# print "test_results=",test_results
return 1
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
#test_results = [(np.argmax(self.feedforward(x)), y)
# for (x, y) in test_data]
test_results = [self.dump(x,y) for (x, y) in test_data]
# print "test_results=",test_results
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
#print "kkk"
#t=[1,2,3,4,5,6]
#a=np.array(t)#[np.random.randn(y, 1) for y in t[1:]]#]np.array(t)
#print a
#d=[np.reshape(a, (2,3))]
#print d
##test_data=([[1,2],[3,4],[5,6],[7,8]],[3,7,11,15])
##test_data=([[1,2],
## [3,4],
## [5,6],
## [7,8]], [[3],[7],[11],[15]])
###
#nn = network.Network([2,3,1])
##nn.SGD(test_data,30,1,3.0)
#nn.evaluate(test_data)
#training_data,validation_data,test_data = mload_data_wrapper([[0.05,0.1],[1,3]],[[0.68,0.01],[0.02,0.9]],0)
#training_data,validation_data,test_data = mload_data_wrapper([[0.05,0.1],[1,3]],[[0.01],[0.99]],0)
#training_data,validation_data,test_data = mload_data_wrapper([[0.05,0.1],[1,3]],[1,2],2)
#training_data,validation_data,test_data = mload_data_wrapper([[1,2]],[[.89,0.01]],0)
training_data,validation_data,test_data = mload_data_wrapper([[1]],[[0]],0)
###net = network.Network([2,3,1])
#net = Network([2,3,2])
#net = Network([1,3,1])
net = Network([1,1])
#print net.feedforward(np.array([[1.1,2.2]]))
print net.mevaluate(test_data)
net.SGD(training_data,300,1,0.15,test_data=test_data)
#training_data,validation_data,test_data = mnist_loader.load_data_wrapper()
#net = network.Network([784,30,10])
#net.SGD(test_data,30,1,3.0,test_data=test_data)
fig = plt.figure()
fig.suptitle('stock',fontsize=14,fontweight='bold')
#ax = fig.add_subplot(1,1,1)
#x=np.arange(0,30,.01)
x=[]
y=[]
for i in range(0,300):
x.append(i)
y.append(0)
ax = fig.add_subplot(1,1,1)
#plt.plot(x,weights,color='r',linewidth=1.5, linestyle="-", label="weight")
#plt.plot(x,biases,color='g',label="biases")
plt.plot(x,costs,color='b',label="costs")
plt.plot(x,y,color='black',linewidth=0.5, linestyle="-")
plt.legend(loc='upper right')
plt.show();
plt.plot(x,outs,color='r',label="out")
plt.plot(x,y,color='black',linewidth=0.5, linestyle="-")
plt.legend(loc='upper right')
plt.show();
plt.plot(x,weights,color='r',linewidth=1.5, linestyle="-", label="weight")
plt.plot(x,biases,color='g',label="biases")
plt.legend(loc='upper right')
plt.show();
|
bsd-3-clause
|
cherrycheung/paparazzi
|
sw/airborne/test/stabilization/compare_ref_quat.py
|
38
|
1206
|
#! /usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import ref_quat_float
import ref_quat_int
steps = 512 * 2
ref_eul_res = np.zeros((steps, 3))
ref_quat_res = np.zeros((steps, 3))
ref_quat_float.init()
ref_quat_int.init()
# reset psi and update_ref_quat_from_eulers
ref_quat_float.enter()
ref_quat_int.enter()
q_sp = np.array([0.92387956, 0.38268346, 0., 0.])
ref_quat_float.sp_quat.array = q_sp
ref_quat_int.sp_quat.array = q_sp * (1 << 15)
for i in range(0, steps):
ref_quat_float.update()
ref_eul_res[i, :] = ref_quat_float.ref_euler.array
ref_quat_int.update()
ref_quat_res[i, :] = ref_quat_int.ref_euler.array / (1 << 20)
plt.figure(1)
plt.subplot(311)
plt.title("reference in euler angles")
plt.plot(np.degrees(ref_eul_res[:, 0]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 0]), 'r')
plt.ylabel("phi [deg]")
plt.subplot(312)
plt.plot(np.degrees(ref_eul_res[:, 1]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 1]), 'r')
plt.ylabel("theta [deg]")
plt.subplot(313)
plt.plot(np.degrees(ref_eul_res[:, 2]), 'g')
plt.plot(np.degrees(ref_quat_res[:, 2]), 'r')
plt.ylabel("psi [deg]")
plt.show()
|
gpl-2.0
|
scipy/scipy-svn
|
scipy/signal/ltisys.py
|
2
|
23885
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> import scipy.signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = sp.signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
|
bsd-3-clause
|
astropy/astropy
|
examples/template/example-template.py
|
11
|
3356
|
# -*- coding: utf-8 -*-
"""
========================
Title of Example
========================
This example <verb> <active tense> <does something>.
The example uses <packages> to <do something> and <other package> to <do other
thing>. Include links to referenced packages like this: `astropy.io.fits` to
show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits'
*By: <names>*
*License: BSD*
"""
##############################################################################
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# uncomment if including figures:
# import matplotlib.pyplot as plt
# from astropy.visualization import astropy_mpl_style
# plt.style.use(astropy_mpl_style)
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to commend
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possible to continue the discussion with a new Python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw Python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Output of the script is captured:
print('Some output from Python')
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the Python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
|
bsd-3-clause
|
cauchycui/scikit-learn
|
sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
Kongsea/tensorflow
|
tensorflow/python/estimator/inputs/inputs.py
|
94
|
1290
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
apache-2.0
|
ZENGXH/scikit-learn
|
sklearn/metrics/__init__.py
|
52
|
3394
|
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
|
bsd-3-clause
|
mgsergio/omim
|
tools/python/transit/transit_graph_generator.py
|
1
|
17616
|
#!/usr/bin/env python2.7
# Generates transit graph for MWM transit section generator.
# Also shows preview of transit scheme lines.
import argparse
import copy
import json
import math
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import os.path
import sys, io
import bezier_curves
import transit_color_palette
class OsmIdCode:
NODE = 0x4000000000000000
WAY = 0x8000000000000000
RELATION = 0xC000000000000000
RESET = ~(NODE | WAY | RELATION)
TYPE2CODE = {
'n': NODE,
'r': RELATION,
'w': WAY
}
def get_extended_osm_id(osm_id, osm_type):
try:
return str(osm_id | OsmIdCode.TYPE2CODE[osm_type[0]])
except KeyError:
raise ValueError('Unknown OSM type: ' + osm_type)
def get_line_id(road_id, line_index):
return road_id << 4 | line_index
def get_interchange_node_id(min_stop_id):
return 1 << 62 | min_stop_id
def clamp(value, min_value, max_value):
return max(min(value, max_value), min_value)
def get_mercator_point(lat, lon):
lat = clamp(lat, -86.0, 86.0)
sin_x = math.sin(math.radians(lat))
y = math.degrees(0.5 * math.log((1.0 + sin_x) / (1.0 - sin_x)))
y = clamp(y, -180, 180)
return {'x': lon, 'y': y}
class TransitGraphBuilder:
def __init__(self, input_data, transit_colors, points_per_curve=100, alpha=0.5):
self.palette = transit_color_palette.Palette(transit_colors)
self.input_data = input_data
self.points_per_curve = points_per_curve
self.alpha = alpha
self.networks = []
self.lines = []
self.stops = {}
self.interchange_nodes = set()
self.transfers = {}
self.gates = {}
self.edges = []
self.segments = {}
self.shapes = []
self.transit_graph = None
self.matched_colors = {}
def __get_average_stops_point(self, stop_ids):
"""Returns an average position of the stops."""
count = len(stop_ids)
if count == 0:
raise ValueError('Average stops point calculation failed: the list of stop id is empty.')
average_point = [0, 0]
for stop_id in stop_ids:
point = self.__get_stop(stop_id)['point']
average_point[0] += point['x']
average_point[1] += point['y']
return [average_point[0] / count, average_point[1] / count]
def __add_gate(self, osm_id, is_entrance, is_exit, point, weight, stop_id):
"""Creates a new gate or adds information to the existing with the same weight."""
if (osm_id, weight) in self.gates:
gate_ref = self.gates[(osm_id, weight)]
if stop_id not in gate_ref['stop_ids']:
gate_ref['stop_ids'].append(stop_id)
gate_ref['entrance'] |= is_entrance
gate_ref['exit'] |= is_exit
return
gate = {'osm_id': osm_id,
'point': point,
'weight': weight,
'stop_ids': [stop_id],
'entrance': is_entrance,
'exit': is_exit
}
self.gates[(osm_id, weight)] = gate
def __get_interchange_node(self, stop_id):
"""Returns the existing interchange node or creates a new one."""
for node_stops in self.interchange_nodes:
if stop_id in node_stops:
return node_stops
return (stop_id,)
def __get_stop(self, stop_id):
"""Returns the stop or the interchange node."""
if stop_id in self.stops:
return self.stops[stop_id]
return self.transfers[stop_id]
def __read_stops(self):
"""Reads stops, their exits and entrances."""
for stop_item in self.input_data['stops']:
stop = {}
stop['id'] = stop_item['id']
stop['osm_id'] = get_extended_osm_id(stop_item['osm_id'], stop_item['osm_type'])
if 'zone_id' in stop_item:
stop['zone_id'] = stop_item['zone_id']
stop['point'] = get_mercator_point(stop_item['lat'], stop_item['lon'])
stop['line_ids'] = []
# TODO: Save stop names stop_item['name'] and stop_item['int_name'] for text anchors calculation.
stop['title_anchors'] = []
self.stops[stop['id']] = stop
for entrance_item in stop_item['entrances']:
ex_id = get_extended_osm_id(entrance_item['osm_id'], entrance_item['osm_type'])
point = get_mercator_point(entrance_item['lat'], entrance_item['lon'])
self.__add_gate(ex_id, True, False, point, entrance_item['distance'], stop['id'])
for exit_item in stop_item['exits']:
ex_id = get_extended_osm_id(exit_item['osm_id'], exit_item['osm_type'])
point = get_mercator_point(exit_item['lat'], exit_item['lon'])
self.__add_gate(ex_id, False, True, point, exit_item['distance'], stop['id'])
def __read_transfers(self):
"""Reads transfers between stops."""
for transfer_item in self.input_data['transfers']:
edge = {'stop1_id': transfer_item[0],
'stop2_id': transfer_item[1],
'weight': transfer_item[2],
'transfer': True
}
self.edges.append(copy.deepcopy(edge))
edge['stop1_id'], edge['stop2_id'] = edge['stop2_id'], edge['stop1_id']
self.edges.append(edge)
def __read_networks(self):
"""Reads networks and routes."""
for network_item in self.input_data['networks']:
network_id = network_item['agency_id']
network = {'id': network_id,
'title': network_item['network']}
self.networks.append(network)
for route_item in network_item['routes']:
line_index = 0
# Create a line for each itinerary.
for line_item in route_item['itineraries']:
if 'name' in line_item:
line_name = '{0} ({1})'.format(route_item['name'], line_item[name])
else:
line_name = route_item['name']
line_stops = line_item['stops']
line_id = get_line_id(route_item['route_id'], line_index)
line = {'id': line_id,
'type': route_item['type'],
'network_id': network_id,
'title': line_name,
'number': route_item['ref'],
'interval': line_item['interval'],
'stop_ids': []
}
line['color'] = self.__match_color(route_item.get('colour', ''), route_item.get('casing', ''))
# TODO: Add processing of line_item['shape'] when this data will be available.
# TODO: Add processing of line_item['trip_ids'] when this data will be available.
# Create an edge for each connection of stops.
for i in range(len(line_stops)):
stop1 = line_stops[i]
line['stop_ids'].append(stop1[0])
self.stops[stop1[0]]['line_ids'].append(line_id)
if i + 1 < len(line_stops):
stop2 = line_stops[i + 1]
edge = {'stop1_id': stop1[0],
'stop2_id': stop2[0],
'weight': stop2[1] - stop1[1],
'transfer': False,
'line_id': line_id,
'shape_ids': []
}
self.edges.append(edge)
self.lines.append(line)
line_index += 1
def __match_color(self, color_str, casing_str):
if color_str is None or len(color_str) == 0:
return self.palette.get_default_color()
if casing_str is None:
casing_str = ''
matched_colors_key = color_str + "/" + casing_str
if matched_colors_key in self.matched_colors:
return self.matched_colors[matched_colors_key]
c = self.palette.get_nearest_color(color_str, casing_str, self.matched_colors.values())
if c != self.palette.get_default_color():
self.matched_colors[matched_colors_key] = c
return c
def __generate_transfer_nodes(self):
"""Merges stops into transfer nodes."""
for edge in self.edges:
if edge['transfer']:
node1 = self.__get_interchange_node(edge['stop1_id'])
node2 = self.__get_interchange_node(edge['stop2_id'])
merged_node = tuple(sorted(set(node1 + node2)))
self.interchange_nodes.discard(node1)
self.interchange_nodes.discard(node2)
self.interchange_nodes.add(merged_node)
for node_stop_ids in self.interchange_nodes:
point = self.__get_average_stops_point(node_stop_ids)
transfer = {'id': get_interchange_node_id(self.stops[node_stop_ids[0]]['id']),
'stop_ids': list(node_stop_ids),
'point': {'x': point[0], 'y': point[1]},
'title_anchors': []
}
for stop_id in node_stop_ids:
self.stops[stop_id]['transfer_id'] = transfer['id']
self.transfers[transfer['id']] = transfer
def __collect_segments(self):
"""Prepares collection of segments for shapes generation."""
# Each line divided on segments by its stops and transfer nodes.
# Merge equal segments from different lines into a single one and collect adjacent stops of that segment.
# Average positions of these stops will be used as guide points for a curve generation.
for line in self.lines:
prev_seg = None
prev_id1 = None
for i in range(len(line['stop_ids']) - 1):
node1 = self.stops[line['stop_ids'][i]]
node2 = self.stops[line['stop_ids'][i + 1]]
id1 = node1.get('transfer_id', node1['id'])
id2 = node2.get('transfer_id', node2['id'])
seg = tuple(sorted([id1, id2]))
if seg not in self.segments:
self.segments[seg] = {'guide_points': {id1: set(), id2: set()}}
if prev_seg is not None:
self.segments[seg]['guide_points'][id1].add(prev_id1)
self.segments[prev_seg]['guide_points'][id1].add(id2)
prev_seg = seg
prev_id1 = id1
def __generate_shapes_for_segments(self):
"""Generates a curve for each connection of two stops / transfer nodes."""
for (id1, id2), info in self.segments.items():
point1 = [self.__get_stop(id1)['point']['x'], self.__get_stop(id1)['point']['y']]
point2 = [self.__get_stop(id2)['point']['x'], self.__get_stop(id2)['point']['y']]
if info['guide_points'][id1]:
guide1 = self.__get_average_stops_point(info['guide_points'][id1])
else:
guide1 = [2 * point1[0] - point2[0], 2 * point1[1] - point2[1]]
if info['guide_points'][id2]:
guide2 = self.__get_average_stops_point(info['guide_points'][id2])
else:
guide2 = [2 * point2[0] - point1[0], 2 * point2[1] - point1[1]]
curve_points = bezier_curves.segment_to_Catmull_Rom_curve(guide1, point1, point2, guide2,
self.points_per_curve, self.alpha)
info['curve'] = np.array(curve_points)
polyline = []
for point in curve_points:
polyline.append({'x': point[0], 'y': point[1]})
shape = {'id': {'stop1_id': id1, 'stop2_id': id2},
'polyline': polyline}
self.shapes.append(shape)
def __assign_shapes_to_edges(self):
"""Assigns a shape to each non-transfer edge."""
for edge in self.edges:
if not edge['transfer']:
stop1 = self.stops[edge['stop1_id']]
stop2 = self.stops[edge['stop2_id']]
id1 = stop1.get('transfer_id', stop1['id'])
id2 = stop2.get('transfer_id', stop2['id'])
seg = tuple(sorted([id1, id2]))
if seg in self.segments:
edge['shape_ids'].append({'stop1_id': seg[0], 'stop2_id': seg[1]})
def __create_scheme_shapes(self):
self.__collect_segments()
self.__generate_shapes_for_segments()
self.__assign_shapes_to_edges()
def build(self):
if self.transit_graph is not None:
return self.transit_graph
self.__read_stops()
self.__read_transfers()
self.__read_networks()
self.__generate_transfer_nodes()
self.__create_scheme_shapes()
self.transit_graph = {'networks': self.networks,
'lines': self.lines,
'gates': self.gates.values(),
'stops': self.stops.values(),
'transfers': self.transfers.values(),
'shapes': self.shapes,
'edges': self.edges}
return self.transit_graph
def show_preview(self):
for (s1, s2), info in self.segments.items():
plt.plot(info['curve'][:, 0], info['curve'][:, 1], 'g')
for stop in self.stops.values():
if 'transfer_id' in stop:
point = self.transfers[stop['transfer_id']]['point']
size = 60
color = 'r'
else:
point = stop['point']
if len(stop['line_ids']) > 2:
size = 40
color = 'b'
else:
size = 20
color = 'g'
plt.scatter([point['x']], [point['y']], size, color)
plt.show()
def show_color_maching_table(self, title, colors_ref_table):
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
plt.title(title)
sz = 1.0 / (2.0 * len(self.matched_colors))
delta_y = sz * 0.5
for c in self.matched_colors:
tokens = c.split('/')
if len(tokens[1]) == 0:
tokens[1] = tokens[0]
ax.add_patch(patches.Rectangle((sz, delta_y), sz, sz, facecolor="#" + tokens[0], edgecolor="#" + tokens[1]))
rect_title = tokens[0]
if tokens[0] != tokens[1]:
rect_title += "/" + tokens[1]
ax.text(2.5 * sz, delta_y, rect_title + " -> ")
ref_color = colors_ref_table[self.matched_colors[c]]
ax.add_patch(patches.Rectangle((0.3 + sz, delta_y), sz, sz, facecolor="#" + ref_color))
ax.text(0.3 + 2.5 * sz, delta_y, ref_color + " (" + self.matched_colors[c] + ")")
delta_y += sz * 2.0
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='input file name of transit data')
parser.add_argument('output_file', nargs='?', help='output file name of generated graph')
default_colors_path = os.path.dirname(os.path.abspath(__file__)) + '/../../../data/transit_colors.txt'
parser.add_argument('-c', '--colors', type=str, default=default_colors_path,
help='transit colors file COLORS_FILE_PATH', metavar='COLORS_FILE_PATH')
parser.add_argument('-p', '--preview', action="store_true", default=False,
help="show preview of the transit scheme")
parser.add_argument('-m', '--matched_colors', action="store_true", default=False,
help="show the matched colors table")
parser.add_argument('-a', '--alpha', type=float, default=0.5, help='the curves generator parameter value ALPHA',
metavar='ALPHA')
parser.add_argument('-n', '--num', type=int, default=100, help='the number NUM of points in a generated curve',
metavar='NUM')
args = parser.parse_args()
with open(args.input_file, 'r') as input_file:
data = json.load(input_file)
with open(args.colors, 'r') as colors_file:
colors = json.load(colors_file)
transit = TransitGraphBuilder(data, colors, args.num, args.alpha)
result = transit.build()
output_file = args.output_file
head, tail = os.path.split(os.path.abspath(args.input_file))
name, extension = os.path.splitext(tail)
if output_file is None:
output_file = os.path.join(head, name + '.transit' + extension)
with io.open(output_file, 'w', encoding='utf8') as json_file:
result_data = json.dumps(result, ensure_ascii=False, indent=4, sort_keys=True)
json_file.write(unicode(result_data))
print 'Transit graph generated:', output_file
if args.preview:
transit.show_preview()
if args.matched_colors:
colors_ref_table = {}
for color_name, color_info in colors['colors'].iteritems():
colors_ref_table[color_name] = color_info['clear']
transit.show_color_maching_table(name, colors_ref_table)
|
apache-2.0
|
mutaphore/ML-CTR
|
nb.py
|
1
|
1153
|
#!/usr/local/bin/python
import os
import sys
import gc
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
from clean import parse_data
if __name__ == '__main__':
f_train = 'train10k'
f_test = 'test'
print "Parsing data..."
YX = parse_data(f_train, combine=True)
YX_train, YX_test = train_test_split(YX, test_size=0.1,
random_state=42)
# random_state=np.random.random_integers(100000))
X_train = np.array(YX_train)[:,1:]
Y_train = np.array(YX_train)[:,0]
X_test = np.array(YX_test)[:,1:]
Y_test = np.array(YX_test)[:,0]
print "Training NB classifier..."
clf = GaussianNB()
clf.fit(X_train, Y_train)
print "Cross validating..."
score = clf.score(X_test, Y_test)
print "CV score on test data %r " % score
# Now predict real test data
print "Parsing test data..."
X_test = parse_data(f_test)[0]
print "Predicting..."
prob = clf.predict_proba(X_test)
print "Writing probs..."
f_out = open("nb_prob", 'w')
for row in prob:
f_out.write(str(row[0]) + '\n')
f_out.close()
|
gpl-2.0
|
harshaneelhg/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
12
|
75078
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/preprocessing/__init__.py
|
268
|
1319
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
bsd-3-clause
|
IndraVikas/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
tobiasgehring/qudi
|
logic/odmr_logic.py
|
1
|
39134
|
# -*- coding: utf-8 -*-
"""
This file contains the Qudi Logic module base class.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
from collections import OrderedDict
from interface.microwave_interface import MicrowaveMode
from interface.microwave_interface import TriggerEdge
import numpy as np
import time
import datetime
import matplotlib.pyplot as plt
import lmfit
from logic.generic_logic import GenericLogic
from core.util.mutex import Mutex
class ODMRLogic(GenericLogic):
"""This is the Logic class for ODMR."""
_modclass = 'odmrlogic'
_modtype = 'logic'
# declare connectors
_connectors = {
'odmrcounter': 'ODMRCounterInterface',
'fitlogic': 'FitLogic',
'microwave1': 'mwsourceinterface',
'savelogic': 'SaveLogic',
'taskrunner': 'TaskRunner'
}
# Internal signals
sigNextLine = QtCore.Signal()
# Update signals, e.g. for GUI module
sigParameterUpdated = QtCore.Signal(dict)
sigOutputStateUpdated = QtCore.Signal(str, bool)
sigOdmrPlotsUpdated = QtCore.Signal(np.ndarray, np.ndarray, np.ndarray)
sigOdmrFitUpdated = QtCore.Signal(np.ndarray, np.ndarray, dict, str)
sigOdmrElapsedTimeUpdated = QtCore.Signal(float, int)
def __init__(self, config, **kwargs):
super().__init__(config=config, **kwargs)
self.log.info('The following configuration was found.')
# checking for the right configuration
for key in config.keys():
self.log.info('{0}: {1}'.format(key, config[key]))
self.threadlock = Mutex()
def on_activate(self):
"""
Initialisation performed during activation of the module.
"""
# Get configuration
config = self.getConfiguration()
# Get connectors
self._mw_device = self.get_connector('microwave1')
self._fit_logic = self.get_connector('fitlogic')
self._odmr_counter = self.get_connector('odmrcounter')
self._save_logic = self.get_connector('savelogic')
self._taskrunner = self.get_connector('taskrunner')
# Setup fit container
self.fc = self._fit_logic.make_fit_container('ODMR sum', '1d')
self.fc.set_units(['Hz', 'c/s'])
if 'fits' in self._statusVariables and isinstance(self._statusVariables['fits'], dict):
self.fc.load_from_dict(self._statusVariables['fits'])
else:
d1 = OrderedDict()
d1['Lorentzian dip'] = {
'fit_function': 'lorentzian',
'estimator': 'dip'
}
d1['Two Lorentzian dips'] = {
'fit_function': 'lorentziandouble',
'estimator': 'dip'
}
d1['N14'] = {
'fit_function': 'lorentziantriple',
'estimator': 'N14'
}
d1['N15'] = {
'fit_function': 'lorentziandouble',
'estimator': 'N15'
}
d1['Two Gaussian dips'] = {
'fit_function': 'gaussiandouble',
'estimator': 'dip'
}
default_fits = OrderedDict()
default_fits['1d'] = d1
self.fc.load_from_dict(default_fits)
# Set/recall clock frequency for ODMR counting device in Hz
if 'clock_frequency' in self._statusVariables:
self.clock_frequency = self._statusVariables['clock_frequency']
else:
self.clock_frequency = 200
# Get hardware constraints
limits = self.get_hw_constraints()
# Set/recall microwave source parameters
if 'cw_mw_frequency' in self._statusVariables:
self.cw_mw_frequency = limits.frequency_in_range(
self._statusVariables['cw_mw_frequency'])
else:
self.cw_mw_frequency = limits.frequency_in_range(2870e6)
if 'cw_mw_power' in self._statusVariables:
self.cw_mw_power = limits.power_in_range(self._statusVariables['cw_mw_power'])
else:
self.cw_mw_power = limits.power_in_range(-30)
if 'sweep_mw_power' in self._statusVariables:
self.sweep_mw_power = limits.power_in_range(self._statusVariables['sweep_mw_power'])
else:
self.sweep_mw_power = limits.power_in_range(-30)
if 'mw_start' in self._statusVariables:
self.mw_start = limits.frequency_in_range(self._statusVariables['mw_start'])
else:
self.mw_start = limits.frequency_in_range(2800e6)
if 'mw_stop' in self._statusVariables:
self.mw_stop = limits.frequency_in_range(self._statusVariables['mw_stop'])
else:
self.mw_stop = limits.frequency_in_range(2950e6)
if 'mw_step' in self._statusVariables:
self.mw_step = limits.list_step_in_range(self._statusVariables['mw_step'])
else:
self.mw_step = limits.list_step_in_range(2e6)
# Set the trigger polarity (RISING/FALLING) of the mw-source input trigger
# theoretically this can be changed, but the current counting scheme will not support that
self.mw_trigger_pol = TriggerEdge.RISING
self.set_trigger_pol(self.mw_trigger_pol)
# Get scanmode from config. Currently only sweep and list is allowed
if 'scanmode' in config:
if ('sweep' in config['scanmode']) or ('SWEEP' in config['scanmode']):
self.mw_scanmode = MicrowaveMode.SWEEP
elif ('list' in config['scanmode']) or ('LIST' in config['scanmode']):
self.mw_scanmode = MicrowaveMode.LIST
else:
self.mw_scanmode = MicrowaveMode.LIST
self.log.error('Specified scanmode "{0}" not valid. Choose "list" or "sweep".\n'
'Falling back to list mode.'.format(config['scanmode']))
else:
self.mw_scanmode = MicrowaveMode.LIST
self.log.warning('No scanmode defined in config for odmr_logic module.\n'
'Falling back to list mode.')
# Set/recall ODMR runtime in seconds
if 'run_time' in self._statusVariables:
self.run_time = self._statusVariables['run_time']
else:
self.run_time = 60
# Elapsed measurement time and number of sweeps
self.elapsed_time = 0.0
self.elapsed_sweeps = 0
# Set flags
# for stopping a measurement
self._stopRequested = False
# for clearing the ODMR data during a measurement
self._clearOdmrData = False
# Set/recall number of lines in the raw data matrix
if 'number_of_lines' in self._statusVariables:
self.number_of_lines = self._statusVariables['number_of_lines']
else:
self.number_of_lines = 50
# Initalize the ODMR data arrays (mean signal and sweep matrix)
self._initialize_odmr_plots()
# Raw data array
self.odmr_raw_data = np.zeros([self.number_of_lines, self.odmr_plot_x.size])
# Switch off microwave and set CW frequency and power
self.mw_off()
self.set_cw_parameters(self.cw_mw_frequency, self.cw_mw_power)
# Connect signals
self.sigNextLine.connect(self._scan_odmr_line, QtCore.Qt.QueuedConnection)
return
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
# Stop measurement if it is still running
if self.getState() == 'locked':
self.stop_odmr_scan()
timeout = 30.0
start_time = time.time()
while self.getState() == 'locked':
time.sleep(0.5)
timeout -= (time.time() - start_time)
if timeout <= 0.0:
self.log.error('Failed to properly deactivate odmr logic. Odmr scan is still '
'running but can not be stopped after 30 sec.')
break
# Switch off microwave source for sure (also if CW mode is active or module is still locked)
self._mw_device.off()
# Disconnect signals
self.sigNextLine.disconnect()
# save parameters stored in app state store
self._statusVariables['clock_frequency'] = self.clock_frequency
self._statusVariables['cw_mw_frequency'] = self.cw_mw_frequency
self._statusVariables['cw_mw_power'] = self.cw_mw_power
self._statusVariables['sweep_mw_power'] = self.sweep_mw_power
self._statusVariables['mw_start'] = self.mw_start
self._statusVariables['mw_stop'] = self.mw_stop
self._statusVariables['mw_step'] = self.mw_step
self._statusVariables['run_time'] = self.run_time
self._statusVariables['number_of_lines'] = self.number_of_lines
if len(self.fc.fit_list) > 0:
self._statusVariables['fits'] = self.fc.save_to_dict()
def _initialize_odmr_plots(self):
""" Initializing the ODMR plots (line and matrix). """
self.odmr_plot_x = np.arange(self.mw_start, self.mw_stop + self.mw_step, self.mw_step)
self.odmr_plot_y = np.zeros(self.odmr_plot_x.size)
self.odmr_fit_x = np.arange(self.mw_start, self.mw_stop + self.mw_step, self.mw_step)
self.odmr_fit_y = np.zeros(self.odmr_fit_x.size)
self.odmr_plot_xy = np.zeros([self.number_of_lines, self.odmr_plot_x.size])
self.sigOdmrPlotsUpdated.emit(self.odmr_plot_x, self.odmr_plot_y, self.odmr_plot_xy)
current_fit = self.fc.current_fit
self.sigOdmrFitUpdated.emit(self.odmr_fit_x, self.odmr_fit_y, {}, current_fit)
return
def set_trigger_pol(self, trigger_pol):
"""
Set trigger polarity of external microwave trigger (for list and sweep mode).
@param object trigger_pol: one of [TriggerEdge.RISING, TriggerEdge.FALLING]
@return object: actually set trigger polarity returned from hardware
"""
if self.getState() != 'locked':
self.mw_trigger_pol = self._mw_device.set_ext_trigger(trigger_pol)
else:
self.log.warning('set_trigger_pol failed. Logic is locked.')
update_dict = {'trigger_pol': self.mw_trigger_pol}
self.sigParameterUpdated.emit(update_dict)
return self.mw_trigger_pol
def set_clock_frequency(self, clock_frequency):
"""
Sets the frequency of the counter clock
@param int clock_frequency: desired frequency of the clock
@return int: actually set clock frequency
"""
# checks if scanner is still running
if self.getState() != 'locked' and isinstance(clock_frequency, (int, float)):
self.clock_frequency = int(clock_frequency)
else:
self.log.warning('set_clock_frequency failed. Logic is either locked or input value is '
'no integer or float.')
update_dict = {'clock_frequency': self.clock_frequency}
self.sigParameterUpdated.emit(update_dict)
return self.clock_frequency
def set_matrix_line_number(self, number_of_lines):
"""
Sets the number of lines in the ODMR matrix
@param int number_of_lines: desired number of matrix lines
@return int: actually set number of matrix lines
"""
if isinstance(number_of_lines, int):
self.number_of_lines = number_of_lines
else:
self.log.warning('set_matrix_line_number failed. '
'Input parameter number_of_lines is no integer.')
update_dict = {'number_of_lines': self.number_of_lines}
self.sigParameterUpdated.emit(update_dict)
return self.number_of_lines
def set_runtime(self, runtime):
"""
Sets the runtime for ODMR measurement
@param float runtime: desired runtime in seconds
@return float: actually set runtime in seconds
"""
if isinstance(runtime, (int, float)):
self.run_time = runtime
else:
self.log.warning('set_runtime failed. Input parameter runtime is no integer or float.')
update_dict = {'runtime': self.run_time}
self.sigParameterUpdated.emit(update_dict)
return self.run_time
def set_cw_parameters(self, frequency, power):
""" Set the desired new cw mode parameters.
@param float frequency: frequency to set in Hz
@param float power: power to set in dBm
@return (float, float): actually set frequency in Hz, actually set power in dBm
"""
if self.getState() != 'locked' and isinstance(frequency, (int, float)) and isinstance(power, (int, float)):
constraints = self.get_hw_constraints()
frequency_to_set = constraints.frequency_in_range(frequency)
power_to_set = constraints.power_in_range(power)
self.cw_mw_frequency, self.cw_mw_power, dummy = self._mw_device.set_cw(frequency_to_set,
power_to_set)
else:
self.log.warning('set_cw_frequency failed. Logic is either locked or input value is '
'no integer or float.')
param_dict = {'cw_mw_frequency': self.cw_mw_frequency, 'cw_mw_power': self.cw_mw_power}
self.sigParameterUpdated.emit(param_dict)
return self.cw_mw_frequency, self.cw_mw_power
def set_sweep_parameters(self, start, stop, step, power):
""" Set the desired frequency parameters for list and sweep mode
@param float start: start frequency to set in Hz
@param float stop: stop frequency to set in Hz
@param float step: step frequency to set in Hz
@param float power: mw power to set in dBm
@return float, float, float, float: current start_freq, current stop_freq,
current freq_step, current power
"""
limits = self.get_hw_constraints()
if self.getState() != 'locked':
if isinstance(start, (int, float)):
self.mw_start = limits.frequency_in_range(start)
if isinstance(stop, (int, float)):
self.mw_stop = limits.frequency_in_range(stop)
if isinstance(step, (int, float)):
if self.mw_scanmode == MicrowaveMode.LIST:
self.mw_step = limits.list_step_in_range(step)
elif self.mw_scanmode == MicrowaveMode.SWEEP:
self.mw_step = limits.sweep_step_in_range(step)
if isinstance(power, (int, float)):
self.sweep_mw_power = limits.power_in_range(power)
else:
self.log.warning('set_sweep_parameters failed. Logic is locked.')
param_dict = {'mw_start': self.mw_start, 'mw_stop': self.mw_stop, 'mw_step': self.mw_step,
'sweep_mw_power': self.sweep_mw_power}
self.sigParameterUpdated.emit(param_dict)
return self.mw_start, self.mw_stop, self.mw_step, self.sweep_mw_power
def mw_cw_on(self):
"""
Switching on the mw source in cw mode.
@return str, bool: active mode ['cw', 'list', 'sweep'], is_running
"""
if self.getState() == 'locked':
self.log.error('Can not start microwave in CW mode. ODMRLogic is already locked.')
else:
self.cw_mw_frequency, \
self.cw_mw_power, \
mode = self._mw_device.set_cw(self.cw_mw_frequency, self.cw_mw_power)
param_dict = {'cw_mw_frequency': self.cw_mw_frequency, 'cw_mw_power': self.cw_mw_power}
self.sigParameterUpdated.emit(param_dict)
if mode != 'cw':
self.log.error('Switching to CW microwave output mode failed.')
else:
err_code = self._mw_device.cw_on()
if err_code < 0:
self.log.error('Activation of microwave output failed.')
mode, is_running = self._mw_device.get_status()
self.sigOutputStateUpdated.emit(mode, is_running)
return mode, is_running
def mw_sweep_on(self):
"""
Switching on the mw source in list/sweep mode.
@return str, bool: active mode ['cw', 'list', 'sweep'], is_running
"""
limits = self.get_hw_constraints()
if self.mw_scanmode == MicrowaveMode.LIST:
if np.abs(self.mw_stop - self.mw_start) / self.mw_step >= limits.list_maxentries:
self.log.warning('Number of frequency steps too large for microwave device. '
'Lowering resolution to fit the maximum length.')
self.mw_step = np.abs(self.mw_stop - self.mw_start) / (limits.list_maxentries - 1)
self.sigParameterUpdated.emit({'mw_step': self.mw_step})
elif self.mw_scanmode == MicrowaveMode.SWEEP:
if np.abs(self.mw_stop - self.mw_start) / self.mw_step >= limits.sweep_maxentries:
self.log.warning('Number of frequency steps too large for microwave device. '
'Lowering resolution to fit the maximum length.')
self.mw_step = np.abs(self.mw_stop - self.mw_start) / (limits.list_maxentries - 1)
self.sigParameterUpdated.emit({'mw_step': self.mw_step})
if self.mw_scanmode == MicrowaveMode.SWEEP:
self.mw_start, \
self.mw_stop, \
self.mw_step, \
self.sweep_mw_power, \
mode = self._mw_device.set_sweep(self.mw_start, self.mw_stop,
self.mw_step, self.sweep_mw_power)
param_dict = {'mw_start': self.mw_start, 'mw_stop': self.mw_stop,
'mw_step': self.mw_step, 'sweep_mw_power': self.sweep_mw_power}
else:
freq_list = np.arange(self.mw_start, self.mw_stop + self.mw_step, self.mw_step)
freq_list, self.sweep_mw_power, mode = self._mw_device.set_list(freq_list,
self.sweep_mw_power)
param_dict = {'sweep_mw_power': self.sweep_mw_power}
self.sigParameterUpdated.emit(param_dict)
if mode != 'list' and mode != 'sweep':
self.log.error('Switching to list/sweep microwave output mode failed.')
elif self.mw_scanmode == MicrowaveMode.SWEEP:
err_code = self._mw_device.sweep_on()
if err_code < 0:
self.log.error('Activation of microwave output failed.')
else:
err_code = self._mw_device.list_on()
if err_code < 0:
self.log.error('Activation of microwave output failed.')
mode, is_running = self._mw_device.get_status()
self.sigOutputStateUpdated.emit(mode, is_running)
return mode, is_running
def reset_sweep(self):
"""
Resets the list/sweep mode of the microwave source to the first frequency step.
"""
if self.mw_scanmode == MicrowaveMode.SWEEP:
self._mw_device.reset_sweeppos()
elif self.mw_scanmode == MicrowaveMode.LIST:
self._mw_device.reset_listpos()
return
def mw_off(self):
""" Switching off the MW source.
@return str, bool: active mode ['cw', 'list', 'sweep'], is_running
"""
error_code = self._mw_device.off()
if error_code < 0:
self.log.error('Switching off microwave source failed.')
mode, is_running = self._mw_device.get_status()
self.sigOutputStateUpdated.emit(mode, is_running)
return mode, is_running
def _start_odmr_counter(self):
"""
Starting the ODMR counter and set up the clock for it.
@return int: error code (0:OK, -1:error)
"""
clock_status = self._odmr_counter.set_up_odmr_clock(clock_frequency=self.clock_frequency)
if clock_status < 0:
return -1
counter_status = self._odmr_counter.set_up_odmr()
if counter_status < 0:
self._odmr_counter.close_odmr_clock()
return -1
return 0
def _stop_odmr_counter(self):
"""
Stopping the ODMR counter.
@return int: error code (0:OK, -1:error)
"""
ret_val1 = self._odmr_counter.close_odmr()
if ret_val1 != 0:
self.log.error('ODMR counter could not be stopped!')
ret_val2 = self._odmr_counter.close_odmr_clock()
if ret_val2 != 0:
self.log.error('ODMR clock could not be stopped!')
# Check with a bitwise or:
return ret_val1 | ret_val2
def start_odmr_scan(self):
""" Starting an ODMR scan.
@return int: error code (0:OK, -1:error)
"""
with self.threadlock:
if self.getState() == 'locked':
self.log.error('Can not start ODMR scan. Logic is already locked.')
return -1
self.lock()
self._clearOdmrData = False
self.stopRequested = False
self.fc.clear_result()
self.elapsed_sweeps = 0
self.elapsed_time = 0.0
self._startTime = time.time()
self.sigOdmrElapsedTimeUpdated.emit(self.elapsed_time, self.elapsed_sweeps)
odmr_status = self._start_odmr_counter()
if odmr_status < 0:
mode, is_running = self._mw_device.get_status()
self.sigOutputStateUpdated.emit(mode, is_running)
self.unlock()
return -1
mode, is_running = self.mw_sweep_on()
if not is_running:
self._stop_odmr_counter()
self.unlock()
return -1
self._initialize_odmr_plots()
# initialize raw_data array
estimated_number_of_lines = self.run_time * self.clock_frequency / self.odmr_plot_x.size
estimated_number_of_lines = int(1.5 * estimated_number_of_lines) # Safety
if estimated_number_of_lines < self.number_of_lines:
estimated_number_of_lines = self.number_of_lines
self.log.debug('Estimated number of raw data lines: {0:d}'
''.format(estimated_number_of_lines))
self.odmr_raw_data = np.zeros([estimated_number_of_lines, self.odmr_plot_x.size])
self.sigNextLine.emit()
return 0
def continue_odmr_scan(self):
""" Continue ODMR scan.
@return int: error code (0:OK, -1:error)
"""
with self.threadlock:
if self.getState() == 'locked':
self.log.error('Can not start ODMR scan. Logic is already locked.')
return -1
self.lock()
self.stopRequested = False
self.fc.clear_result()
self._startTime = time.time() - self.elapsed_time
self.sigOdmrElapsedTimeUpdated.emit(self.elapsed_time, self.elapsed_sweeps)
odmr_status = self._start_odmr_counter()
if odmr_status < 0:
mode, is_running = self._mw_device.get_status()
self.sigOutputStateUpdated.emit(mode, is_running)
self.unlock()
return -1
mode, is_running = self.mw_sweep_on()
if not is_running:
self._stop_odmr_counter()
self.unlock()
return -1
self.sigNextLine.emit()
return 0
def stop_odmr_scan(self):
""" Stop the ODMR scan.
@return int: error code (0:OK, -1:error)
"""
with self.threadlock:
if self.getState() == 'locked':
self.stopRequested = True
return 0
def clear_odmr_data(self):
"""¨Set the option to clear the curret ODMR data.
The clear operation has to be performed within the method
_scan_odmr_line. This method just sets the flag for that. """
with self.threadlock:
if self.getState() == 'locked':
self._clearOdmrData = True
return
def _scan_odmr_line(self):
""" Scans one line in ODMR
(from mw_start to mw_stop in steps of mw_step)
"""
with self.threadlock:
# If the odmr measurement is not running do nothing
if self.getState() != 'locked':
return
# Stop measurement if stop has been requested
if self.stopRequested:
self.stopRequested = False
self.mw_off()
self._stop_odmr_counter()
self.unlock()
return
# if during the scan a clearing of the ODMR data is needed:
if self._clearOdmrData:
self.elapsed_sweeps = 0
self._startTime = time.time()
# reset position so every line starts from the same frequency
self.reset_sweep()
# Acquire count data
new_counts = self._odmr_counter.count_odmr(length=self.odmr_plot_x.size)
if new_counts[0] == -1:
self.stopRequested = True
self.sigNextLine.emit()
return
# Add new count data to mean signal
if self._clearOdmrData:
self.odmr_plot_y[:] = 0
self.odmr_plot_y = (self.elapsed_sweeps * self.odmr_plot_y + new_counts) / (
self.elapsed_sweeps + 1)
# Add new count data to raw_data array and append if array is too small
if self._clearOdmrData:
self.odmr_raw_data[:, :] = 0
self._clearOdmrData = False
if self.elapsed_sweeps == (self.odmr_raw_data.shape[0] - 1):
expanded_array = np.zeros([self.odmr_raw_data.shape[0] + self.number_of_lines,
self.odmr_raw_data.shape[1]])
expanded_array[:self.elapsed_sweeps, :] = self.odmr_raw_data[
:self.elapsed_sweeps, :]
self.odmr_raw_data = expanded_array
self.log.warning('raw data array in ODMRLogic was not big enough for the entire '
'measurement. Array will be expanded.\nOld array shape was '
'({0:d}, {1:d}), new shape is ({2:d}, {3:d}).'
''.format(self.odmr_raw_data.shape[0]-self.number_of_lines,
self.odmr_raw_data.shape[1],
self.odmr_raw_data.shape[0],
self.odmr_raw_data.shape[1]))
# shift data in the array "up" and add new data at the "bottom"
self.odmr_raw_data[1:self.elapsed_sweeps + 1, :] = self.odmr_raw_data[
:self.elapsed_sweeps, :]
self.odmr_raw_data[0, :] = new_counts
# Set plot slice of matrix
self.odmr_plot_xy = self.odmr_raw_data[:self.number_of_lines, :]
# Update elapsed time/sweeps
self.elapsed_sweeps += 1
self.elapsed_time = time.time() - self._startTime
if self.elapsed_time >= self.run_time:
self.stopRequested = True
# Fire update signals
self.sigOdmrElapsedTimeUpdated.emit(self.elapsed_time, self.elapsed_sweeps)
self.sigOdmrPlotsUpdated.emit(self.odmr_plot_x, self.odmr_plot_y, self.odmr_plot_xy)
self.sigNextLine.emit()
return
def get_hw_constraints(self):
""" Return the names of all ocnfigured fit functions.
@return object: Hardware constraints object
"""
constraints = self._mw_device.get_limits()
return constraints
def get_fit_functions(self):
""" Return the hardware constraints/limits
@return list(str): list of fit function names
"""
return list(self.fc.fit_list)
def do_fit(self, fit_function=None, x_data=None, y_data=None):
"""
Execute the currently configured fit on the measurement data. Optionally on passed data
"""
if (x_data is None) or (y_data is None):
x_data = self.odmr_plot_x
y_data = self.odmr_plot_y
if fit_function is not None and isinstance(fit_function, str):
if fit_function in self.get_fit_functions():
self.fc.set_current_fit(fit_function)
else:
self.fc.set_current_fit('No Fit')
if fit_function != 'No Fit':
self.log.warning('Fit function "{0}" not available in ODMRLogic fit container.'
''.format(fit_function))
self.odmr_fit_x, self.odmr_fit_y, result = self.fc.do_fit(x_data, y_data)
if result is None:
result_str_dict = {}
else:
result_str_dict = result.result_str_dict
self.sigOdmrFitUpdated.emit(self.odmr_fit_x, self.odmr_fit_y,
result_str_dict, self.fc.current_fit)
return
def save_odmr_data(self, tag=None, colorscale_range=None, percentile_range=None):
""" Saves the current ODMR data to a file."""
if tag is None:
tag = ''
# two paths to save the raw data and the odmr scan data.
filepath = self._save_logic.get_path_for_module(module_name='ODMR')
filepath2 = self._save_logic.get_path_for_module(module_name='ODMR')
timestamp = datetime.datetime.now()
if len(tag) > 0:
filelabel = tag + '_ODMR_data'
filelabel2 = tag + '_ODMR_data_raw'
else:
filelabel = 'ODMR_data'
filelabel2 = 'ODMR_data_raw'
# prepare the data in a dict or in an OrderedDict:
data = OrderedDict()
data2 = OrderedDict()
data['frequency (Hz)'] = self.odmr_plot_x
data['count data (counts/s)'] = self.odmr_plot_y
data2['count data (counts/s)'] = self.odmr_raw_data[:self.elapsed_sweeps, :]
parameters = OrderedDict()
parameters['Microwave Power (dBm)'] = self.mw_power
parameters['Run Time (s)'] = self.run_time
parameters['Number of frequency sweeps (#)'] = self.elapsed_sweeps
parameters['Start Frequency (Hz)'] = self.mw_start
parameters['Stop Frequency (Hz)'] = self.mw_stop
parameters['Step size (Hz)'] = self.mw_step
parameters['Clock Frequency (Hz)'] = self.clock_frequency
if self.fc.current_fit != 'No Fit':
parameters['Fit function'] = self.fc.current_fit
# add all fit parameter to the saved data:
for name, param in self.fc.current_fit_param.items():
parameters[name] = str(param)
fig = self.draw_figure(cbar_range=colorscale_range, percentile_range=percentile_range)
self._save_logic.save_data(data,
filepath=filepath,
parameters=parameters,
filelabel=filelabel,
fmt='%.6e',
delimiter='\t',
timestamp=timestamp,
plotfig=fig)
self._save_logic.save_data(data2,
filepath=filepath2,
parameters=parameters,
filelabel=filelabel2,
fmt='%.6e',
delimiter='\t',
timestamp=timestamp)
self.log.info('ODMR data saved to:\n{0}'.format(filepath))
return
def draw_figure(self, cbar_range=None, percentile_range=None):
""" Draw the summary figure to save with the data.
@param: list cbar_range: (optional) [color_scale_min, color_scale_max].
If not supplied then a default of data_min to data_max
will be used.
@param: list percentile_range: (optional) Percentile range of the chosen cbar_range.
@return: fig fig: a matplotlib figure object to be saved to file.
"""
freq_data = self.odmr_plot_x
count_data = self.odmr_plot_y
fit_freq_vals = self.odmr_fit_x
fit_count_vals = self.odmr_fit_y
matrix_data = self.odmr_plot_xy
# If no colorbar range was given, take full range of data
if cbar_range is None:
cbar_range = np.array([np.min(matrix_data), np.max(matrix_data)])
else:
cbar_range = np.array(cbar_range)
prefix = ['', 'k', 'M', 'G', 'T']
prefix_index = 0
# Rescale counts data with SI prefix
while np.max(count_data) > 1000:
count_data = count_data / 1000
fit_count_vals = fit_count_vals / 1000
prefix_index = prefix_index + 1
counts_prefix = prefix[prefix_index]
# Rescale frequency data with SI prefix
prefix_index = 0
while np.max(freq_data) > 1000:
freq_data = freq_data / 1000
fit_freq_vals = fit_freq_vals / 1000
prefix_index = prefix_index + 1
mw_prefix = prefix[prefix_index]
# Rescale matrix counts data with SI prefix
prefix_index = 0
while np.max(matrix_data) > 1000:
matrix_data = matrix_data / 1000
cbar_range = cbar_range / 1000
prefix_index = prefix_index + 1
cbar_prefix = prefix[prefix_index]
# Use qudi style
plt.style.use(self._save_logic.mpl_qd_style)
# Create figure
fig, (ax_mean, ax_matrix) = plt.subplots(nrows=2, ncols=1)
ax_mean.plot(freq_data, count_data, linestyle=':', linewidth=0.5)
# Do not include fit curve if there is no fit calculated.
if max(fit_count_vals) > 0:
ax_mean.plot(fit_freq_vals, fit_count_vals, marker='None')
ax_mean.set_ylabel('Fluorescence (' + counts_prefix + 'c/s)')
ax_mean.set_xlim(np.min(freq_data), np.max(freq_data))
matrixplot = ax_matrix.imshow(matrix_data,
cmap=plt.get_cmap('inferno'), # reference the right place in qd
origin='lower',
vmin=cbar_range[0],
vmax=cbar_range[1],
extent=[np.min(freq_data),
np.max(freq_data),
0,
self.number_of_lines
],
aspect='auto',
interpolation='nearest')
ax_matrix.set_xlabel('Frequency (' + mw_prefix + 'Hz)')
ax_matrix.set_ylabel('Scan #')
# Adjust subplots to make room for colorbar
fig.subplots_adjust(right=0.8)
# Add colorbar axis to figure
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7])
# Draw colorbar
cbar = fig.colorbar(matrixplot, cax=cbar_ax)
cbar.set_label('Fluorescence (' + cbar_prefix + 'c/s)')
# remove ticks from colorbar for cleaner image
cbar.ax.tick_params(which=u'both', length=0)
# If we have percentile information, draw that to the figure
if percentile_range is not None:
cbar.ax.annotate(str(percentile_range[0]),
xy=(-0.3, 0.0),
xycoords='axes fraction',
horizontalalignment='right',
verticalalignment='center',
rotation=90
)
cbar.ax.annotate(str(percentile_range[1]),
xy=(-0.3, 1.0),
xycoords='axes fraction',
horizontalalignment='right',
verticalalignment='center',
rotation=90
)
cbar.ax.annotate('(percentile)',
xy=(-0.3, 0.5),
xycoords='axes fraction',
horizontalalignment='right',
verticalalignment='center',
rotation=90
)
return fig
def perform_odmr_measurement(self, freq_start, freq_step, freq_stop, power, runtime,
fit_function='No Fit', save_after_meas=True, name_tag=''):
""" An independant method, which can be called by a task with the proper input values
to perform an odmr measurement.
@return
"""
timeout = 30
start_time = time.time()
while self.getState() != 'idle':
time.sleep(0.5)
timeout -= (time.time() - start_time)
if timeout <= 0:
self.log.error('perform_odmr_measurement failed. Logic module was still locked '
'and 30 sec timeout has been reached.')
return {}
# set all relevant parameter:
self.set_power(power)
self.set_sweep_frequencies(freq_start, freq_stop, freq_step)
self.set_runtime(runtime)
# start the scan
self.start_odmr_scan()
# wait until the scan has started
while self.getState() != 'locked':
time.sleep(1)
# wait until the scan has finished
while self.getState() == 'locked':
time.sleep(1)
# Perform fit if requested
if fit_function != 'No Fit':
self.do_fit(fit_function)
fit_params = self.fc.current_fit_param
else:
fit_params = None
# Save data if requested
if save_after_meas:
self.save_odmr_data(tag=name_tag)
return self.odmr_plot_x, self.odmr_plot_y, fit_params
|
gpl-3.0
|
arjoly/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
HIPS/autograd
|
examples/fluidsim/wing.py
|
3
|
6134
|
from __future__ import absolute_import
from __future__ import print_function
from builtins import range
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
rows, cols = 40, 60
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def occlude(f, occlusion):
return f * (1 - occlusion)
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(int)
top_ix = np.floor(center_xs).astype(int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def make_continuous(f, occlusion):
non_occluded = 1 - occlusion
num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\
+ np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\
+ np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\
+ np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1)
den = np.roll(non_occluded, 1, axis=0)\
+ np.roll(non_occluded, -1, axis=0)\
+ np.roll(non_occluded, 1, axis=1)\
+ np.roll(non_occluded, -1, axis=1)
return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001)
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
occlusion = sigmoid(occlusion)
# Disallow occlusion outside a certain area.
mask = np.zeros((rows, cols))
mask[10:30, 10:30] = 1.0
occlusion = occlusion * mask
# Initialize smoke bands.
red_smoke = np.zeros((rows, cols))
red_smoke[rows//4:rows//2] = 1
blue_smoke = np.zeros((rows, cols))
blue_smoke[rows//2:3*rows//4] = 1
print("Running simulation...")
vx, vy = project(vx, vy, occlusion)
for t in range(num_time_steps):
plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated, occlusion)
red_smoke = advect(red_smoke, vx, vy)
red_smoke = occlude(red_smoke, occlusion)
blue_smoke = advect(blue_smoke, vx, vy)
blue_smoke = occlude(blue_smoke, occlusion)
plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
return vx, vy
def plot_matrix(ax, r, g, b, t, render=False):
if ax:
plt.cla()
ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 20
print("Loading initial and target states...")
init_vx = np.ones((rows, cols))
init_vy = np.zeros((rows, cols))
# Initialize the occlusion to be a block.
init_occlusion = -np.ones((rows, cols))
init_occlusion[15:25, 15:25] = 0.0
init_occlusion = init_occlusion.ravel()
def drag(vx): return np.mean(init_vx - vx)
def lift(vy): return np.mean(vy - init_vy)
def objective(params):
cur_occlusion = np.reshape(params, (rows, cols))
final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
return -lift(final_vy) / drag(final_vx)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(weights):
cur_occlusion = np.reshape(weights, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
print("Rendering initial flow...")
callback(init_occlusion)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
options={'maxiter':50, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
final_occlusion = np.reshape(result.x, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)
print("Converting frames to an animated GIF...") # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png "
"-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
os.system("rm step*.png")
|
mit
|
USCLiquidPropulsionLaboratory/Engine-sizing-snake
|
GOX_kero.py
|
1
|
36017
|
## GOX-kerosene sim
#@ Author Juha Nieminen
#import sys
#sys.path.insert(0, '/Users/juhanieminen/Documents/adamrocket')
import RocketComponents as rc
from physical_constants import poise, inches, Runiv, gallons, lbm, \
gearth, atm, psi, lbf
from numpy import pi, linspace, cos, radians, sqrt, exp, log, array, full, ceil
from scipy import optimize as opt
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import Flows1D as flows
#DESIGN VARIABLES____________________________________________________________________________________
# nominal parameters
Preg_ox = 1100*psi # regulated GOX outlet pressure [Pa]
Preg_N2 = 1060*psi # regulated N2 outlet pressure [Pa]
mdot_fuel_nom = 0.2 # This is only for cooling jacket pressure drop purposes [kg/s]
Pdrop_jacket_nom= 1*psi # Cooling jacket pressure drop at mdot_nominal [Pa]
OF_nom = 2.25 # Oxidizer-to-fuel ratio. This has only effect on initial guesses during solving
# Pressurant tank dimensions
Vprestank = 0.050 # N2 pressurant tank volume [m3]
# Propellant tank dimensions
Vfueltank = 0.006 # fuel tank volume
Voxtank = 0.050 # ox tank volume [m3]
# Tubing
d_presfuel_tube = 1.0*inches # pressurant tank -> fuel tank tube diameter [m]
L_presfuel_tube = 0.5 # pressurant tank -> fuel tank tube length [m]
d_oxtube = 0.87*inches # ox tank -> manifold tube diameter [m]
L_oxtube = 2.4 # ox tank -> manifold tube length [m]
d_fueltube = 0.87*inches # fuel tank -> manifold tube diameter [m]
L_fueltube = 3.0 # fuel tank -> manifold tube length [m]
roughness = 0.005 # epsilon/diameter, dimensionless
# Valves
Cv_ox_check = 4.7 # oxidizer check valve flow coefficient, dimensionless
Pcrack_ox_check = 10*psi # oxidizer check valve opening pressure [Pa]
Cv_pres_check = 4.7 # nitrogen check valve flow coefficient, dimensionless
Pcrack_pres_check = 10*psi # nitrogen check valve opening pressure [Pa]
Cv_pres_valve = 9 # nitrogen solenoid valve flow coefficient, dimensionless
Cv_ox_valve = 9 # oxidizer solenoid valve flow coefficient, dimensionless
Cv_fuel_valve = 9 # fuel solenoid valve flow coefficient, dimensionless
# Injector
cd_oxInjector = 0.767 # orifice discharge coefficient
diameter_oxInjectorHoles = 2.54e-3 #number xx drill # ox orifice diameter [m]
#length_oxHole = 0.005 # ox orifice length [m]
numOxInjectorHoles = 24 # number of ox orifices in the injector
area_oxInjector = numOxInjectorHoles*pi*diameter_oxInjectorHoles**2/4 # total ox flow area [m2]
cd_fuelInjector = 0.767 # orifice discharge coefficient
diameter_fuelInjectorHoles = 0.508e-3 #number xx drill # fuel orifice diameter [m]
numFuelHoles = 64 # number of fuel orifices in the injector
area_fuelInjector = numFuelHoles*pi*diameter_fuelInjectorHoles**2/4 # total fuel flow area [m2]
# Define initial/nominal conditions in the chamber (obtained from CEA code assuming OFratio = 2.25)
TfireInit = 293 # initial flame temperature [K]
Pfire = 1*atm # initial chamber pressure [Pa]
gammaFireInit = 1.148 # dimensionless
ga = gammaFireInit
mbarFireInit = 21.87 # combustion products' initial molecular mass [kg/kmol]
RfireInit = Runiv/mbarFireInit # combustion products' initial specific gas constant [J/kgK]
Pambient = atm # ambient pressure [Pa]
# Nozzle and chamber
d_nozzleThroat = 1.0*inches # throat diameter [m]
A_nozzleThroat = pi*d_nozzleThroat**2/4 # throat area [m2]
area_ratio = 7.46 # nozzle exit-to-throat area ratio
A_nozzleExit = area_ratio*A_nozzleThroat # nozzle exit area [m2]
d_nozzleExit = sqrt(4*A_nozzleExit/pi) # nozzle exit diameter [m]
Dchamber = 0.08 # chamber diameter [m]
Achamber = pi*Dchamber**2/4 # chamber cross sectional area [m2]
Lchamber = 0.14 # chamber length [m]
Vchamber = Achamber*Lchamber # chamber volume [m3]
Lstar = Vchamber/A_nozzleThroat # chamber characteristic length [m]
Mc_nom = flows.getIsentropicMs(A_nozzleThroat, Achamber, gammaFireInit)[0] # nominal chamber Mach number
print("throat diameter is", '%.1f'%(d_nozzleThroat*1000), 'mm')
print("exit diameter is", '%.1f'%(d_nozzleExit*1000), 'mm')
print("chamber volume is", '%.5f'%Vchamber, "m3")
print("chamber Lstar is", '%.2f'%Lstar, "m")
print("chamber Mach_nom is", '%.2f'%Mc_nom)
# INITIAL CONDITIONS____________________________________________________________________________________________
#Define initial conditions in the tanks
TfuelPresStart = 293 # Fuel pressurant (=nitrogen) temp [K]
FFfueltankStart = 0.5 # Fuel tank fill fraction (Vfuel/Vtank)
PfuelPrestankStart = 2216*psi - Preg_N2*Vfueltank*FFfueltankStart/Vprestank # Fuel pressurant tank pressure once fueltank has been pressurized [Pa]
ToxStart = 293 # Oxidizer (GOX) temp [K]
PoxtankStart = 2216*psi # Oxidizer tank pressure [Pa]
TfuelStart = 293 # Fuel temp [K]
PfueltankStart = Preg_N2 -10*psi # Fuel tank pressure [Pa] (-10psi helps convergence on first timestep)
# initialize propellants
nitrogen = rc.NitrogenFluid()
GOX = rc.GOXFluid()
kerosene = rc.Kerosene()
#initialize nozzle and chamber
nozzle = rc.ConvergingDivergingNozzle(A_nozzleExit, A_nozzleThroat)
mdot_init_noz = nozzle.getmdot(gammaFireInit, GOX.R, Pfire, TfireInit, atm)
chamber = rc.GOXKeroCombustionChamber(nozzle, Vchamber, TfireInit, ga, mbarFireInit, Pfire, atm, mdot_init_noz)
#initialize injector orifices
ox_orifice = rc.GasOrifice(area_oxInjector, cd_oxInjector, GOX.gamma, GOX.R)
fuel_orifice = rc.LiquidOrifice(area_fuelInjector, cd_fuelInjector )
#initialize pressurant tanks
fuelprestank = rc.IdealgasTank(nitrogen, Vprestank, TfuelPresStart, PfuelPrestankStart)
#initialize propellant tanks
oxtank = rc.IdealgasTank(GOX, Voxtank, ToxStart, PoxtankStart)
fueltank = rc.LiquidPropellantTank(nitrogen, kerosene, Vfueltank, TfuelStart, TfuelPresStart,\
PfueltankStart, FFfueltankStart, Preg_N2)
#initialize pressure regulators
N2_regu = rc.PressureRegulator(Preg_N2, nitrogen)
ox_regu = rc.PressureRegulator(Preg_ox, GOX)
#initialize solenoids
fuelSole = rc.IncompressibleFlowSolenoid( Cv_fuel_valve)
oxSole = rc.CompressibleFlowSolenoid( Cv_ox_valve, GOX)
presSole = rc.CompressibleFlowSolenoid( Cv_pres_valve, nitrogen)
#initialize check valves
ox_check = rc.CompressibleFlowCheckValve( Cv_ox_check, Pcrack_ox_check, GOX)
pres_check = rc.CompressibleFlowCheckValve( Cv_pres_check, Pcrack_pres_check, nitrogen)
#initialize tubing
ox_tube = rc.RoughStraightCylindricalTube(d_oxtube, L_oxtube, roughness, True)
fuel_tube = rc.RoughStraightCylindricalTube(d_fueltube, L_fueltube, roughness, True)
presfuel_tube = rc.RoughStraightCylindricalTube(d_presfuel_tube, L_presfuel_tube, roughness, True)
#initialize cooling jacket
jacket = rc.CoolingJacket(mdot_fuel_nom, Pdrop_jacket_nom)
#initialize arrays for various data time histories
T_chamber = [chamber.T] # combustion chamber temperature [K]
Pchamber = [chamber.get_P_inlet()] # combustion chamber pressure [Pa]
Pexit = [nozzle.getPe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit pressure [Pa]
Mexit = [nozzle.getMe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit Mach number
cmass = [chamber.m] # resident propellant mass in combustion chamber [kg]
mdot_nozzle = [nozzle.getmdot(gammaFireInit, RfireInit, chamber.get_P_inlet(), chamber.T, chamber.Pa)] # mass flow out of the nozzle [kg/s]
Poxtank = [oxtank.getPtank()] # ox tank pressure [Pa]
Toxtank = [oxtank.getTtank()] # ox tank temperature [K]
mox = [oxtank.getM()] # oxidizer mass in tank [kg]
Pfueltank = [fueltank.getPtank()] # fuel tank pressure [Pa]
Tfueltank = [fueltank.getTpres()] # pressurant temperature in fuel tank[K]
mPresFueltank = [fueltank.getMpres()] # pressurant mass in fuel tank [kg]
mfuel = [fueltank.getMprop()] # fuel mass in tank [kg]
FFfueltank = [fueltank.getFF()] # fuel tank fill fraction defined as Vfuel/(Vfueltank)
TfuelPres = [fuelprestank.getTtank()] # temperature in fuel pressurant tank [K]
PfuelPres = [fuelprestank.getPtank()] # pressure in fuel pressurant tank [Pa]
mfuelPres = [fuelprestank.getM()] # pressurant mass in fuel pressurant tank [Pa]
time = [0] # time array [s]
mdot_ox = [0] # ox mass flow out of the tank [kg/s]
P1ox = [0] # ox tank presssure [Pa]
P2ox = [0] # ox regulator outlet pressure [Pa]
P3ox = [0] # ox check valve outlet pressure [Pa]
P4ox = [0] # ox flow solenoid outlet pressure [Pa]
P5ox = [0] # ox injector inlet pressure [Pa]
T1ox = [0] # ox tank temp [K]
T2ox = [0] # ox regulator output temp [K]
T3ox = [0] # ox check valve outlet temp [K]
T4ox = [0] # ox flow solenoid outlet temp [K]
T5ox = [0] # ox injector inlet temp [K]
mdot_fuel = [0] # fuel mass flow out of the tank [kg/s]
rooFuel = fueltank.propellant.density # fuel density, assumed constant [kg/m3]
P1fuel = [0] # fuel tank presssure [Pa]
P2fuel = [0] # fuel solenoid outlet pressure [Pa]
P3fuel = [0] # fuel cooling jacket inlet pressure [Pa]
P4fuel = [0] # fuel injector inlet pressure [Pa]
mdot_fuel_pres = [0] # fuel pressurant mass flow rate [kg/s]
P3pres = [0] # pressurant pressure at check valve outlet [kg/s]
P4pres = [0] # pressurant pressure at solenoid valve outlet [kg/s]
mTotal = [0] # propellant mass in the system [kg]
mprs = [mfuelPres[0]+mPresFueltank[0]] # pressurant mass in the system [kg]
OFratio = [0] # oxidizer to fuel mass flow ratio
Isp = [0] # specific impulse [s]
Thrust = [nozzle.getThrust(chamber.get_P_inlet(), Pambient, gammaFireInit) ] # rocket thrust [N]
#SIMULATE_______________________________________________________________________________________________________
# using orifices as follows: ejecting GOX from manifold to chamber, fuel liq-to-liq from manifold to chamber
print("")
print("STARTING SIM...")
print("")
print("mOxStart is", '%.2f'%mox[0], "kg")
print("mKerostart is", mfuel[0], "kg")
print("mN2start in N2 tank is", '%.2f'%mfuelPres[0], "kg")
print("mN2start in fuel tank is", '%.2f'%(fueltank.getMpres()), "kg")
# The first step is to solve oxidizer and fuel mass flow rates from the tank to combustion chamber.
# definitions:
# P1ox = GOX tank pressure
# P2ox = regulation pressure
# P3ox = check valve outlet pressure
# P4ox = ox valve outlet pressure
# P5ox = injector inlet, pressure
# (P1ox-P2ox) = regulator pressure drop, known constant
# (P2ox-P3ox) = ox check valve pressure drop, eq 1
# (P3ox-P4ox) = ox flow solenoid pressure drop, eq 2
# (P4ox-P5ox) = ox tubing pressure drop, eq 3
# (P5ox-Pchamber) = ox injector pressure drop, eq 4
# P1pres = Nitrogen tank pressure
# P2pres = Regulation pressure
# P3pres = Check valve outlet pressure
# P4pres = Nitrogen solenoid outlet
# P5pres = Nitrogen tubing outlet = fuel tank pressure
# (P2pres-P3pres) = Nitrogen check valve pressure drop
# (P3pres-P4pres) = Nitrogen solenoid valve pressure drop
# (P4pres-P5pres) = Nitrogen tubing pressure drop
# P1fuel = fuel tank pressure
# P2fuel = fuel valve outlet pressure
# P3fuel = cooling jacket inlet pressure
# P4fuel = injector inlet pressure
# (P1fuel-P2fuel) = fuel valve pressure drop, eq1
# (P2fuel-P3fuel) = fuel tubing pressure drop, eq2
# (P3fuel-P4fuel) = cooling jacket pressure drop, eq3
# (P4fuel-Pchamber) = injector pressure drop, eq4
# In the case of oxidizer, P2 and Pchamber are known, so one must solve for P3, P4, and P5. Fourth unknown is the mass flow rate. The four equations are check valve/solenoid/tubing/injector pressure drops. These equations are defined in oxfunks method below, and underlying physics are in RocketComponents.py under their respective classes.
# With pressurant, P2 (regulation pressure) and P5 (fuel tank pressure) are known, so one must solve for P3 and P4. The third unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# With fuel P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is mass flow rate.
# fsolve requires sensible initial guesses for all unknowns. They are established by guessing the mass flow rate, because all other pressures trickle down from that.
timestep_small = 5e-6 # seconds, used during initial transient
timestep_nom = 0.0001 # seconds, used after 0.01 seconds of simulation time
t_transient = 0.01 # seconds, estimated time of initial transient
t_simulation = 3 # seconds
if t_simulation <= t_transient:
simsteps = int(ceil(t_simulation/timestep_small))
else:
simsteps = int(ceil( t_transient/timestep_small + (t_simulation-t_transient)/timestep_nom ))
print("Sim time is", t_simulation, "s, number of simsteps is", simsteps)
i=0
for i in range(0, simsteps):
if time[i] < 0.01:
timestep = 1e-5 # use shorter timestep during initial transient
else: timestep = timestep_nom # proceed with nominal timestep
#while True:
#print("i=", i)
P1ox = Poxtank[i]
P2ox = Preg_ox
P1fuel = Pfueltank[i]
Pchamb = Pchamber[i]
mu_ox = GOX.getViscosity(Preg_ox, Toxtank[i])
roo_ox = GOX.getDensity(Preg_ox, Toxtank[i])
Tox = Toxtank[i]
Tpres = TfuelPres[i]
mu_fuel = kerosene.mu
mu_N2_fuel = nitrogen.getViscosity(Preg_N2, TfuelPres[i])
roo_N2_fuel = nitrogen.getDensity(Preg_N2, TfuelPres[i])
if i==0: # First guesses. Based on choked flow at ox injector (multplied by 0.7 to adjust for better convergence)
mdot_ox_guess = ox_orifice.getMdot(Preg_ox, Pfire, Tox)*0.7
P3ox_guess = P2ox - ox_check.getPressureDrop(mdot_ox_guess, P2ox, GOX.roo_std, roo_ox, Tox)
P4ox_guess = P3ox_guess - oxSole.getPressureDrop(mdot_ox_guess, P3ox_guess, roo_ox)
P5ox_guess = P4ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P5ox_guess is", P5ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_ox_guess/OF_nom
P2fuel_guess = P1fuel - fuelSole.getPressureDrop(mdot_fuel_guess, rooFuel)
P3fuel_guess = P2fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
P4fuel_guess = P3fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
mdot_pres_guess = mdot_fuel_guess*roo_N2_fuel/rooFuel #volumetric flowrates of fuel and pressurant are the same
P3pres_guess = Preg_N2 - pres_check.getPressureDrop(mdot_pres_guess, Preg_N2, nitrogen.roo_std, roo_N2_fuel, Tpres)
P4pres_guess = P3pres_guess - presSole.getPressureDrop(mdot_pres_guess, P3pres_guess, roo_N2_fuel)
P5pres_guess = P4pres_guess - presfuel_tube.getPressureDrop(mdot_pres_guess, mu_N2_fuel, roo_N2_fuel)
#print("mdot_pres_guess is is", mdot_pres_guess, "kg/s")
#print("P3pres_guess is is", P3pres_guess/psi, "psi")
#print("P4pres_guess is is", P4pres_guess/psi, "psi")
#print("P5pres_guess is is", P5pres_guess/psi, "psi")
#print("mdot_fuel_guess is", mdot_fuel_guess)
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
else : # guesses for further steps. Use values from previous timestep
mdot_ox_guess = mdot_ox[i-1] #ox_orifice.getMdot(Preg_ox, Pchamb, Tox)
#P3ox_guess = P2ox - oxSole.getPressureDrop(mdot_ox_guess, P2ox,roo_ox)
#P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P3ox_guess = P3ox[i-1]
P4ox_guess = P4ox[i-1]
P5ox_guess = P5ox[i-1]
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_fuel[i-1] #mdot_ox_guess/OF_nom*1
P2fuel_guess = P2fuel[i-1]
P3fuel_guess = P3fuel[i-1]
P4fuel_guess = P4fuel[i-1]
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_pres_guess = mdot_fuel_pres[i-1]
P3pres_guess = P3pres[i-1]
P4pres_guess = P4pres[i-1]
initial_ox_guesses = [P3ox_guess, P4ox_guess, P5ox_guess, mdot_ox_guess]
initial_fuel_guesses= [P2fuel_guess, P3fuel_guess, P4fuel_guess, mdot_fuel_guess]
initial_pres_guesses= [P3pres_guess, P4pres_guess, mdot_pres_guess]
def oxfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
P5 = U[2]
mdot = U[3]
#print("nyt TAALLA")
#print("P3 as U0 is", P3/psi, "psi")
#print("P4 as U1 is", P4/psi, "psi")
#print("P5 as U2 is", P5/psi, "psi")
#print("mdot as U3 is", mdot, "kg/s")
#print("mdot is", mdot, "kg/s")
#print("P4ox is", P4/psi, "psi")
#print("Pchamb is", Pchamb/psi, "psi")
#out = [ P2ox - P3 - ox_check.getPressureDrop(mdot, P2ox, GOX.roo_std, roo_ox, Tox) ]
out = [ mdot - ox_check.getMdot(P2ox, P3, GOX.roo_std, roo_ox, Tox) ]
out.append( P3 - P4 - oxSole.getPressureDrop( mdot, P3, roo_ox) )
out.append( P4 - P5 - ox_tube.getPressureDrop(mdot, mu_ox, roo_ox) )
out.append( mdot - ox_orifice.getMdot(P5, Pchamb, Tox) )
#print("oxoutti", out)
return out
ox_solution = opt.fsolve(oxfunks, initial_ox_guesses) # iterates until finds a solution or goes bust
#print("ox solution is", ox_solution)
mdot_ox_new = ox_solution[3]
#print("mdot_ox_nyyy is", mdot_ox_new, "kg/s")
def fuelfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("U is", U)
#print("fuelmdot is", mdot)
out = [ mdot - fuelSole.getMdot(P1fuel, P2, rooFuel, kerosene.P_crit, kerosene.P_vapor) ]
out.append( P2 - P3 - fuel_tube.getPressureDrop(mdot, mu_fuel, rooFuel) )
out.append( P3 - P4 - jacket.getPressureDrop(mdot) )
out.append( P4 - Pchamb - fuel_orifice.getPressureDrop(mdot, rooFuel) )
#print("fueloutti", out)
return out
fuel_solution = opt.fsolve(fuelfunks, initial_fuel_guesses)
#print("fuel solution is", fuel_solution)
mdot_fuel_new = fuel_solution[3]
# Now that fuel mass flow rate out has been solved, intermediate state (=no N2 inflow yet) of the fuel tank can be established:
fueltank.update(TfuelPres[i], 0, mdot_fuel_new, timestep)
Pfuel_intermediate = fueltank.getPtank()
Pfuel_eff = (Pfuel_intermediate + P1fuel)/2 # average of pressures before and after ejection of fuel from tank; incoming nitrogen will see this 'effective' pressure in the tank
# Next, nitrogen flow into the void created by ejected fuel is calculated
def presfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
mdot = U[2]
out = [mdot - pres_check.getMdot(Preg_N2, P3, nitrogen.roo_std, roo_N2_fuel, Tpres) ]
#out.append( P3 - P4 - presSole.getPressureDrop(mdot, P3, roo_N2_fuel) )
out.append( mdot - presSole.getMdot(P3, P4, roo_N2_fuel) )
#out.append( P4 - Pfuel_eff - presfuel_tube.getPressureDrop(mdot, mu_N2_fuel, roo_N2_fuel) )
out.append( mdot - presfuel_tube.getMdot(P4, Pfuel_eff, mu_N2_fuel, roo_N2_fuel) )
#print("presoutti", out)
return out
pres_solution = opt.fsolve(presfunks, initial_pres_guesses)
#print("pres solution is", pres_solution)
mdot_pres_new = pres_solution[2]
#print("mdot_pres_new is", mdot_pres_new, "kg/s")
# Determine final conditions in prop tanks now that N2 inflow has been determined
oxtank.update(mdot_ox_new, timestep)
fueltank.update(TfuelPres[i], mdot_pres_new, 0, timestep)
# ...and fuel pressurant tank
fuelprestank.update(mdot_pres_new, timestep)
# Check if OFratio is within limits. If not, stop simulation (no CEA data beyond OFratio 0.5-3.0)
if (mdot_ox_new/mdot_fuel_new) < 0.5 or (mdot_ox_new/mdot_fuel_new) > 3.0:
print("OF ratio out of range, terminate")
print("mdot_ox_new is", mdot_ox_new, "kg/s")
print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
break
# Update chamber parameters:
chamber.update(mdot_ox_new, mdot_fuel_new, Pambient, timestep) # mdot_ox_in, mdot_fuel_in, Pambient, timestep
# Check if ox or fuel tank will empty during this timestep. If so, stop simulation.
if oxtank.getPtank() < Preg_ox:
print("Ox tank reached regulation pressure (=empty) after", i, " iterations, ie", i*timestep, "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fueltank.getMprop() < 0:
print("Fuel tank empty after", i, " iterations, ie", i*timestep, "seconds")
print("remaining GOX", mox[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fuelprestank.getPtank() < Preg_N2:
print("Out of fuel pressurant after", i, " iterations, ie", i*timestep, "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining GOX", mox[i], "kg")
break
#update mass flow time histories. These are values during the CURRENT time step.
if i==0:
P3ox = [ox_solution[0]]
P4ox = [ox_solution[1]]
P5ox = [ox_solution[2]]
mdot_ox = [ox_solution[3]]
P2fuel = [fuel_solution[0]]
P3fuel = [fuel_solution[1]]
P4fuel = [fuel_solution[2]]
mdot_fuel = [fuel_solution[3]]
P3pres = [pres_solution[0]]
P4pres = [pres_solution[1]]
mdot_fuel_pres = [pres_solution[2]]
OFratio = [ mdot_ox[0]/mdot_fuel[0] ]
else:
P3ox.append( ox_solution[0])
P4ox.append( ox_solution[1])
P5ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
mdot_fuel_pres.append( pres_solution[2])
#print("i is= ", i)
OFratio.append( mdot_ox[i]/mdot_fuel[i])
#update the rest of the time histories. System will have these values during the NEXT time step.
Poxtank.append( oxtank.getPtank())
Toxtank.append( oxtank.getTtank())
mox.append( oxtank.getM())
Pfueltank.append( fueltank.getPtank())
Tfueltank.append( fueltank.getTpres())
mPresFueltank.append( fueltank.getMpres())
mfuel.append( fueltank.getMprop())
FFfueltank.append( fueltank.getFF())
TfuelPres.append( fuelprestank.getTtank())
PfuelPres.append( fuelprestank.getPtank())
mfuelPres.append( fuelprestank.getM())
#mdot_fuel_pres.append( mdot_pres_new)
Pchamber.append( chamber.get_P_inlet() )
Pexit.append( nozzle.getPe(Pchamber[i+1], chamber.gamma, Pambient) )
Mexit.append( nozzle.getMe(Pchamber[i+1], chamber.gamma, Pambient) )
cmass.append( chamber.m)
mdot_nozzle.append( nozzle.getmdot(chamber.gamma, Runiv/chamber.mbar, chamber.get_P_inlet(),\
chamber.T, chamber.Pa) )
Thrust.append( nozzle.getThrust(chamber.get_P_inlet(), Pambient, chamber.gamma) )
T_chamber.append( chamber.T)
Isp.append( Thrust[i+1]/(mdot_ox[i] + mdot_fuel[i])/9.81 )
mTotal.append(mox[i+1] + mfuel[i+1] + cmass[i+1] + mdot_nozzle[i]*timestep )
mprs.append( mPresFueltank[i+1] + mfuelPres[i+1] )
time.append( time[i]+timestep )
i+=1
# Print some values
print("")
print("mdot_nozzle steady state (end of sim) is", '%.3f'%mdot_nozzle[-1], "kg/s")
print("SS thrust is", '%.1f'%Thrust[-1], "N")
print("SS Isp is", '%.1f'%Isp[-1], "s")
print("SS T_chamber is",'%.1f'%T_chamber[-1], "K")
print("SS P_chamber is", '%.1f'%(Pchamber[-1]/psi), "psi")
print("SS P_exit is", '%.3f'%(Pexit[-1]/atm), "atm")
print("SS thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[-1], atm, chamber.get_gamma(OFratio[-1])) )
print("SS mdot_N2 is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS N2 flow rate is", '%.3f'%(mdot_fuel_pres[-1]/roo_N2_fuel*1000/3.78*60), "GPM")
print("SS mdot_ox is", '%.3f'%mdot_ox[-1], "kg/s")
print("SS mdot_fuel is", '%.3f'%mdot_fuel[-1], "kg/s")
print("SS O/F ratio is", '%.3f'%OFratio[-1])
print("SS ox tube velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("SS fuel tube velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("SS ox injection velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("SS fuel injection velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("SS ox injector P_drop", '%.1f'%((P4ox[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("SS fuel injector P_drop", '%.1f'%((P4fuel[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("")
# See what check valves are doing
dP_ox_check = (Preg_ox - P3ox[-1])
dP_N2_check = (Preg_N2 - P3pres[-1])
if dP_ox_check < ox_check.Pcrack:
print("Warning: Pressure drop over ox check valve (",'%.1f'%(dP_ox_check/psi),"psi) is less than its cracking pressure (",ox_check.Pcrack/psi,"psi) and will remain shut")
else:
print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi, enough to keep it flowing")
if dP_N2_check < ox_check.Pcrack:
print("Warning: Pressure drop over N2 check valve(",'%.1f'%(dP_N2_check/psi),"psi) is less than its cracking pressure (",pres_check.Pcrack/psi,"psi) and will remain shut")
else:
print("N2 check valve pressure drop is", '%.1f'%(dP_N2_check/psi), "psi, enough to keep it flowing")
# following time histories are one element shorter than the rest, so the last calculated value will be duplicated to match the length of other time histories.
P3ox.append( ox_solution[0])
P4ox.append( ox_solution[1])
P5ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
mdot_fuel_pres.append( pres_solution[2])
OFratio.append( mdot_ox[i]/mdot_fuel[i])
# plot time histories
plt.ion()
Preg_ox_array = full((1, len(time)), Preg_ox/psi)
plt.figure(1)
plt.plot(time, array(Poxtank)/psi, label='ox tank')
plt.figure(1)
plt.plot(time, Preg_ox_array.T, label="P_regulation")
plt.figure(1)
plt.plot(time,array(P3ox)/psi, label='Pcheck_out')
plt.figure(1)
plt.plot(time,array(P4ox)/psi, label='Psolenoid_out')
plt.figure(1)
plt.plot(time,array(P5ox)/psi, label='Pinj_in')
plt.figure(1)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(1)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Ox pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psia')
plt.show()
Preg_N2_array = full((1, len(time)), Preg_N2/psi)
plt.figure(2)
plt.plot(time, array(PfuelPres)/psi, label='fuelpres tank')
plt.figure(2)
plt.plot(time, Preg_N2_array.T, label="P_regulation")
plt.figure(2)
plt.plot(time,array(P3pres)/psi, label='N2 check valve out')
plt.figure(2)
plt.plot(time,array(P4pres)/psi, label='N2 solenoid valve out')
plt.figure(2)
plt.plot(time,array(Pfueltank)/psi, label='fuel tank')
plt.figure(2)
plt.plot(time,array(P2fuel)/psi, label='Pvalve_out')
plt.figure(2)
plt.plot(time,array(P3fuel)/psi, label='Pjacket_in')
plt.figure(2)
plt.plot(time,array(P4fuel)/psi, label='Pinj_in')
plt.figure(2)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(2)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Fuel pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('Psia')
plt.show()
plt.figure(3)
plt.plot(time,Toxtank, label='Ox tank')
plt.figure(3)
plt.plot(time,Tfueltank, label='Fuel tank')
plt.figure(3)
plt.plot(time,TfuelPres, label='fuel pressurant tank')
plt.title('Tank temperatures')
plt.legend( loc='lower left')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(4)
plt.plot(time,mdot_ox, label='mdot_ox')
plt.figure(4)
plt.plot(time,mdot_fuel, label='mdot_fuel')
plt.figure(4)
plt.plot(time,mdot_nozzle, label='mdot_nozzle')
plt.figure(4)
plt.plot(time,mdot_fuel_pres, label='mdot_fuel_pres')
plt.title('Mass flows')
plt.xlabel('Time [s]')
plt.ylabel('kg/s')
plt.legend( loc='upper right')
plt.show()
plt.figure(5)
plt.plot(time,FFfueltank, label='fuel tank')
plt.title('Fill fractions in fuel tank (Vfuel_/Vtank)')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.legend( loc='upper right')
plt.show()
plt.figure(6)
plt.plot(time, OFratio)
plt.title('O/F ratio')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.show()
plt.figure(7)
plt.plot(time,mox, label='GOX')
plt.figure(7)
plt.plot(time,mfuel, label='fuel')
plt.figure(7)
plt.plot(time,mfuelPres, label='fuel pressurant')
plt.figure(7)
plt.plot(time,mPresFueltank, label='pressurant in fuel tank')
plt.figure(7)
plt.plot(time,mprs, label='total pressurant')
plt.title('Fluid masses')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.legend( loc='upper right')
plt.show()
plt.figure(8)
plt.plot(time, cmass)
plt.title('Resident mass in chamber')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.show()
plt.figure(9)
plt.plot(time, Thrust)
plt.title('Thrust')
plt.xlabel('Time [s]')
plt.ylabel('N')
plt.show()
plt.figure(10)
plt.plot(time, Isp)
plt.title('Isp')
plt.xlabel('Time [s]')
plt.ylabel('s')
plt.show()
plt.figure(11)
plt.plot(time, T_chamber)
plt.title('T chamber')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(12)
plt.plot(time, Mexit)
plt.title('Exit Mach number')
plt.xlabel('Time [s]')
plt.ylabel('-')
plt.show()
plt.figure(13)
y1 = PfuelPres[-1]/psi
y2 = Preg_N2/psi
y3 = P3pres[-1]/psi
y4 = P4pres[-1]/psi
y5 = Pfueltank[-1]/psi
y6 = P2fuel[-1]/psi
y7 = P3fuel[-1]/psi
y8 = P4fuel[-1]/psi
y9 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Check valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Pressurant solenoid")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Pressurant tubing")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Fuel tank")
plt.plot( [6, 7], [y5, y6], linewidth=2, label="Fuel solenoid")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="Piping")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="Cooling jacket")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="Fuel injector")
plt.plot( [10, 11], [y9, y9], linewidth=2, label="Chamber")
plt.title('Fuel line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.figure(14)
y1 = Poxtank[-1]/psi
y2 = Preg_ox/psi
y3 = P3ox[-1]/psi
y4 = P4ox[-1]/psi
y5 = P5ox[-1]/psi
y6 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Ox tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Check valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Ox solenoid")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Tubing")
plt.plot( [5, 6], [y5, y6], linewidth=2, label="Ox injector")
plt.plot( [6, 7], [y6, y6], linewidth=2, label="Chamber")
plt.title('Ox line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
|
mit
|
victorbergelin/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
Unidata/MetPy
|
v0.8/_downloads/Inverse_Distance_Verification.py
|
1
|
7114
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Inverse Distance Verification: Cressman and Barnes
==================================================
Compare inverse distance interpolation methods
Two popular interpolation schemes that use inverse distance weighting of observations are the
Barnes and Cressman analyses. The Cressman analysis is relatively straightforward and uses
the ratio between distance of an observation from a grid cell and the maximum allowable
distance to calculate the relative importance of an observation for calculating an
interpolation value. Barnes uses the inverse exponential ratio of each distance between
an observation and a grid cell and the average spacing of the observations over the domain.
Algorithmically:
1. A KDTree data structure is built using the locations of each observation.
2. All observations within a maximum allowable distance of a particular grid cell are found in
O(log n) time.
3. Using the weighting rules for Cressman or Barnes analyses, the observations are given a
proportional value, primarily based on their distance from the grid cell.
4. The sum of these proportional values is calculated and this value is used as the
interpolated value.
5. Steps 2 through 4 are repeated for each grid cell.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from metpy.gridding.gridding_functions import calc_kappa
from metpy.gridding.interpolation import barnes_point, cressman_point
from metpy.gridding.triangles import dist_2
def draw_circle(ax, x, y, r, m, label):
th = np.linspace(0, 2 * np.pi, 100)
nx = x + r * np.cos(th)
ny = y + r * np.sin(th)
ax.plot(nx, ny, m, label=label)
###########################################
# Generate random x and y coordinates, and observation values proportional to x * y.
#
# Set up two test grid locations at (30, 30) and (60, 60).
np.random.seed(100)
pts = np.random.randint(0, 100, (10, 2))
xp = pts[:, 0]
yp = pts[:, 1]
zp = xp * xp / 1000
sim_gridx = [30, 60]
sim_gridy = [30, 60]
###########################################
# Set up a cKDTree object and query all of the observations within "radius" of each grid point.
#
# The variable ``indices`` represents the index of each matched coordinate within the
# cKDTree's ``data`` list.
grid_points = np.array(list(zip(sim_gridx, sim_gridy)))
radius = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point(grid_points, r=radius)
###########################################
# For grid 0, we will use Cressman to interpolate its value.
x1, y1 = obs_tree.data[indices[0]].T
cress_dist = dist_2(sim_gridx[0], sim_gridy[0], x1, y1)
cress_obs = zp[indices[0]]
cress_val = cressman_point(cress_dist, cress_obs, radius)
###########################################
# For grid 1, we will use barnes to interpolate its value.
#
# We need to calculate kappa--the average distance between observations over the domain.
x2, y2 = obs_tree.data[indices[1]].T
barnes_dist = dist_2(sim_gridx[1], sim_gridy[1], x2, y2)
barnes_obs = zp[indices[1]]
ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))
kappa = calc_kappa(ave_spacing)
barnes_val = barnes_point(barnes_dist, barnes_obs, kappa)
###########################################
# Plot all of the affiliated information and interpolation values.
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
for i, zval in enumerate(zp):
ax.plot(pts[i, 0], pts[i, 1], '.')
ax.annotate(str(zval) + ' F', xy=(pts[i, 0] + 2, pts[i, 1]))
ax.plot(sim_gridx, sim_gridy, '+', markersize=10)
ax.plot(x1, y1, 'ko', fillstyle='none', markersize=10, label='grid 0 matches')
ax.plot(x2, y2, 'ks', fillstyle='none', markersize=10, label='grid 1 matches')
draw_circle(ax, sim_gridx[0], sim_gridy[0], m='k-', r=radius, label='grid 0 radius')
draw_circle(ax, sim_gridx[1], sim_gridy[1], m='b-', r=radius, label='grid 1 radius')
ax.annotate('grid 0: cressman {:.3f}'.format(cress_val), xy=(sim_gridx[0] + 2, sim_gridy[0]))
ax.annotate('grid 1: barnes {:.3f}'.format(barnes_val), xy=(sim_gridx[1] + 2, sim_gridy[1]))
ax.set_aspect('equal', 'datalim')
ax.legend()
###########################################
# For each point, we will do a manual check of the interpolation values by doing a step by
# step and visual breakdown.
#
# Plot the grid point, observations within radius of the grid point, their locations, and
# their distances from the grid point.
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.annotate('grid 0: ({}, {})'.format(sim_gridx[0], sim_gridy[0]),
xy=(sim_gridx[0] + 2, sim_gridy[0]))
ax.plot(sim_gridx[0], sim_gridy[0], '+', markersize=10)
mx, my = obs_tree.data[indices[0]].T
mz = zp[indices[0]]
for x, y, z in zip(mx, my, mz):
d = np.sqrt((sim_gridx[0] - x)**2 + (y - sim_gridy[0])**2)
ax.plot([sim_gridx[0], x], [sim_gridy[0], y], '--')
xave = np.mean([sim_gridx[0], x])
yave = np.mean([sim_gridy[0], y])
ax.annotate('distance: {}'.format(d), xy=(xave, yave))
ax.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y))
ax.set_xlim(0, 80)
ax.set_ylim(0, 80)
ax.set_aspect('equal', 'datalim')
###########################################
# Step through the cressman calculations.
dists = np.array([22.803508502, 7.21110255093, 31.304951685, 33.5410196625])
values = np.array([0.064, 1.156, 3.364, 0.225])
cres_weights = (radius * radius - dists * dists) / (radius * radius + dists * dists)
total_weights = np.sum(cres_weights)
proportion = cres_weights / total_weights
value = values * proportion
val = cressman_point(cress_dist, cress_obs, radius)
print('Manual cressman value for grid 1:\t', np.sum(value))
print('Metpy cressman value for grid 1:\t', val)
###########################################
# Now repeat for grid 1, except use barnes interpolation.
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.annotate('grid 1: ({}, {})'.format(sim_gridx[1], sim_gridy[1]),
xy=(sim_gridx[1] + 2, sim_gridy[1]))
ax.plot(sim_gridx[1], sim_gridy[1], '+', markersize=10)
mx, my = obs_tree.data[indices[1]].T
mz = zp[indices[1]]
for x, y, z in zip(mx, my, mz):
d = np.sqrt((sim_gridx[1] - x)**2 + (y - sim_gridy[1])**2)
ax.plot([sim_gridx[1], x], [sim_gridy[1], y], '--')
xave = np.mean([sim_gridx[1], x])
yave = np.mean([sim_gridy[1], y])
ax.annotate('distance: {}'.format(d), xy=(xave, yave))
ax.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y))
ax.set_xlim(40, 80)
ax.set_ylim(40, 100)
ax.set_aspect('equal', 'datalim')
###########################################
# Step through barnes calculations.
dists = np.array([9.21954445729, 22.4722050542, 27.892651362, 38.8329756779])
values = np.array([2.809, 6.241, 4.489, 2.704])
weights = np.exp(-dists**2 / kappa)
total_weights = np.sum(weights)
value = np.sum(values * (weights / total_weights))
print('Manual barnes value:\t', value)
print('Metpy barnes value:\t', barnes_point(barnes_dist, barnes_obs, kappa))
plt.show()
|
bsd-3-clause
|
Djabbz/scikit-learn
|
benchmarks/bench_mnist.py
|
76
|
6136
|
"""
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
|
bsd-3-clause
|
giorgiop/scipy
|
scipy/signal/wavelets.py
|
20
|
10472
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
|
bsd-3-clause
|
Matterhorn-eth/pymh
|
pymh/examples/matterhorn_v1.py
|
1
|
16621
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 7 14:32:12 2016
@author: bfilippo
"""
# %%
import sys
import subprocess
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import copy
import numpy as np
sys.dont_write_bytecode = True
pymhfolder = '/w04d2/bfilippo/pymh'
# pymhfolder = '/Users/filippo/work/pymh'
sys.path.insert(0, pymhfolder)
import pymh
from pymh.param.parameters import \
GridParam, DecompositionParam, TimeParam, ModelParam, SimulationParam, \
InputParam, OutputParam, BCParam, IBCParam, InjectionParam, \
DirParam, LocationsParam
from pymh.sim.simulations import \
BasicSim
from pymh.utils.utils import \
isPinRectangle
from pymh.utils import \
segyread_new
from pymh.utils import \
segyread
from pymh.io.model import \
truncate
reload(pymh.param.parameters)
# Abbreviations
# fn = filename
# bd = boundary
# vb = volume boundary
# %%
# if __name__ == '__main__':
# ----------------------------------------------------------------------
# Create directory structure
# -------------------------------------------------------------------------
Dir = DirParam()
Dir.makedirs()
# Add cluster directory
# %% ----------------------------------------------------------------------
# Initialize parameters
# -------------------------------------------------------------------------
# Common
Time = TimeParam()
BC = BCParam('pml')
inprefix = 'model_full'
ext = '.su'
model_full = {}
for attr in ('vp', 'rho'):
model_full[attr] = segyread_new.SEGYFile('/'.join([Dir.parameters['ref'], inprefix + '_' + attr + ext]), isSU=True)
nz = model_full['vp'].ns
nx = len(model_full['vp'])
# Full
FullGrid = GridParam()
FullDecomposition = DecompositionParam()
FullModel = ModelParam(filename_prefix=['model_full'])
FullSimulation = SimulationParam(freesurface=[True])
FullSimulation.parameters.update(BC.parameters)
FullInput = InputParam()
FullOutput = []
FullOutput.append(OutputParam('shot_gather',
receiver_origin=[0, 0, 40]))
FullOutput.append(OutputParam('slice',
timestep_increment=[50]))
# IBC
IBC = IBCParam('rigidboundary')
IBCGrid = GridParam(origin=IBC.extraparameters['inj']['origin'],
number_of_cells=IBC.extraparameters['inj']['ncells'])
IBCDecomposition = DecompositionParam(number_of_cells_per_node_x=[IBC.extraparameters['inj']['ncells'][0]],
number_of_cells_per_node_y=[IBC.extraparameters['inj']['ncells'][1]],
number_of_cells_per_node_z=[IBC.extraparameters['inj']['ncells'][2]])
IBCModel = ModelParam(filename_prefix=['model_ibc'])
IBCSimulation = SimulationParam()
IBCInjection = InjectionParam()
IBCOutput = []
IBCOutput.append(OutputParam('slice',
timestep_increment=[50]))
# Injection
Injection = InjectionParam('mps')
InjectionGrid = GridParam(origin=Injection.extraparameters['origin'],
number_of_cells=Injection.extraparameters['ncells'])
InjectionDecomposition = DecompositionParam(number_of_cells_per_node_x=[Injection.extraparameters['ncells'][0]],
number_of_cells_per_node_y=[Injection.extraparameters['ncells'][1]],
number_of_cells_per_node_z=[Injection.extraparameters['ncells'][2]])
InjectionModel = ModelParam(filename_prefix=['model_inj'])
InjectionOutput = []
InjectionOutput.append(OutputParam('slice',
timestep_increment=[50]))
# Green's functions
GFInput = InputParam(wavelet=['delta'],
spread=['single_point_staggered_velocity'])
GFOutput = []
GFOutput.append(OutputParam('sub_volume_boundary',
stagger_on_sub_volume=[True],
boundary_thickness=[1]))
# %% ----------------------------------------------------------------------
# Models
# -------------------------------------------------------------------------
# IBC model
truncate(SmallGrid=IBCGrid,
inprefix='model_full', inpath=Dir.parameters['ref'],
outprefix='model_ibc', outpath=Dir.parameters['ibc'],
ispadding=True,
ext='.su')
# Injection model
truncate(SmallGrid=InjectionGrid,
inprefix='model_full', inpath=Dir.parameters['ref'],
outprefix='model_inj', outpath=Dir.parameters['inj'],
ispadding=False,
ext='.su')
# %% ----------------------------------------------------------------------
# Locations
# -------------------------------------------------------------------------
Locations = {}
locations_fn = {}
for surf in ('inj', 'rec'):
Locations[surf] = LocationsParam()
Locations[surf].rectangle(origin=IBC.extraparameters[surf]['origin'],
number_of_cells=IBC.extraparameters[surf]['ncells'],
cell_size=IBC.extraparameters[surf]['cell_size'])
# print Locations[surf].locations
locations_fn[surf] = 's' + surf + '_locations.txt'
Locations[surf].write(locations_fn[surf], path=Dir.parameters['ref'])
Locations[surf].write(locations_fn[surf], path=Dir.parameters['ibc'])
Locations[surf].write(locations_fn[surf], path=Dir.parameters['ibc_gf'])
Locations['inj'].write(locations_fn['inj'], path=Dir.parameters['inj'])
# Temporary, eventually I need to fix this
# It is only needed when using gffu to reorder injection boundary data
Locations_for_inj_util = copy.deepcopy(Locations['rec'])
Locations_for_inj_util.locations[1:] = []
locations_inj_fn = 's' + 'rec' + '_locations_injection.txt'
Locations_for_inj_util.write(locations_inj_fn, path=Dir.parameters['ref'])
# %% ----------------------------------------------------------------------
# Is source outside or inside?
# -------------------------------------------------------------------------
# This also needs to be improved a lot
# Right now, it only check if the source is inside srec
corners = []
for loc in Locations['rec'].locations:
if loc[3]//6 == 1:
corners.append(loc[:3])
corners = [corners[i] for i in [0, 1, 3, 2]]
# print corners
IBC.extraparameters['source_inside'] = \
isPinRectangle(corners, FullInput.parameters['location'])
# %% ----------------------------------------------------------------------
# Extrapolation?
# -------------------------------------------------------------------------
# Does it really have to be included in IBC?
IBC.extraparameters['extrap'] = True
# %% ----------------------------------------------------------------------
# Create simulation files
# -------------------------------------------------------------------------
# Full
if not IBC.extraparameters['source_inside']:
injection_mono_fn = 'injection_sxx_x_{}_y_{}_z_{}'.format(*FullInput.parameters['location'])
FullOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['inj']],
filename_prefix=[injection_mono_fn],
boundary_thickness=[1],
attribute=['S00XX']))
injection_di_fn = 'injection_vn_x_{}_y_{}_z_{}'.format(*FullInput.parameters['location'])
FullOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['inj']],
filename_prefix=[injection_di_fn],
boundary_thickness=[1],
attribute=['normal_velocity'],
stagger_on_sub_volume=[True]))
if IBC.parameters['type'][0] is 'freesurface':
injection_mono_staggered_fn = 'injection_sxx_staggered_x_{}_y_{}_z_{}'.format(*FullInput.parameters['location'])
FullOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['inj']],
filename_prefix=[injection_mono_staggered_fn],
boundary_thickness=[1],
attribute=['S00XX'],
stagger_on_sub_volume=[True]))
injection_fn = {}
injection_fn['mono'] = [injection_mono_fn]
injection_fn['di'] = [injection_di_fn]
full_input_fn = 'full.txt'
FullSim = BasicSim(FullGrid,
FullDecomposition,
Time,
FullModel,
FullSimulation,
FullInput,
FullOutput)
FullSim.create(full_input_fn, path=Dir.parameters['ref'])
# IBC
if IBC.extraparameters['extrap']:
ibc_extrap_mono_fn = 'ebc_extrap_sxx'
IBCOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['rec']],
filename_prefix=[ibc_extrap_mono_fn],
boundary_thickness=[1],
attribute=['S00XX']))
ibc_extrap_di_fn = 'ebc_extrap_vn'
IBCOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['rec']],
filename_prefix=[ibc_extrap_di_fn],
boundary_thickness=[1],
attribute=['normal_velocity'],
stagger_on_sub_volume=[True]))
ibc_input_fn = 'ibc.txt'
if IBC.extraparameters['source_inside']:
InpOrInj = FullInput
else:
InpOrInj = IBCInjection
if IBC.parameters['type'][0] is 'freesurface':
InpOrInj.parameters.pop('injection_filelist_di')
else:
InpOrInj.parameters.pop('injection_filelist_mono')
IBCSim = BasicSim(IBCGrid,
IBCDecomposition,
Time,
IBCModel,
IBCSimulation,
IBC,
InpOrInj,
IBCOutput)
IBCSim.create(ibc_input_fn, path=Dir.parameters['ibc'])
# Injection
if IBC.extraparameters['extrap']:
inj_extrap_mono_fn = 'inj_extrap_sxx'
InjectionOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['rec']],
filename_prefix=[inj_extrap_mono_fn],
boundary_thickness=[1],
attribute=['S00XX']))
inj_extrap_di_fn = 'inj_extrap_vn'
InjectionOutput.append(OutputParam('sub_volume_boundary',
receiver_locations=[locations_fn['rec']],
filename_prefix=[inj_extrap_di_fn],
boundary_thickness=[1],
attribute=['normal_velocity'],
stagger_on_sub_volume=[True]))
injection_input_fn = 'injection.txt'
InjectionSim = BasicSim(InjectionGrid,
InjectionDecomposition,
Time,
InjectionModel,
FullSimulation,
Injection,
InjectionOutput)
InjectionSim.create(injection_input_fn, path=Dir.parameters['inj'])
# Green's functions
if IBC.parameters['type'][0] is 'freesurface':
GFOutput[0].parameters['attribute'] = ['S00XX']
else:
GFOutput[0].parameters['attribute'] = ['normal_velocity']
GFOutput[0].parameters['receiver_locations'] = [locations_fn['inj']]
if IBC.extraparameters['extrap']:
GFOutput.append(OutputParam('shot_gather',
receiver_origin=[0, 0, 20],
receiver_increment=[8, 0, 0],
number_of_receivers=[125]))
facedict = {0: (('mono', 'isotropic_stress_source', 'mono'), ('x_di', 'x_source', 'di')),
1: (('mono', 'isotropic_stress_source', 'mono'), ('x_di', 'x_source', 'di')),
2: (('mono', 'isotropic_stress_source', 'mono'), ('z_di', 'z_source', 'di')),
3: (('mono', 'isotropic_stress_source', 'mono'), ('z_di', 'z_source', 'di'))
}
gf_input_fn = {}
gf_fn = {}
gf_extrap_fn = {}
for i in ('mono', 'di'):
gf_input_fn[i] = []
gf_fn[i] = []
gf_extrap_fn[i] = []
for (i, loc) in enumerate(Locations['rec'].locations):
face = loc[3] % 6
for (j, k, l) in facedict[face]:
gf_input_fn[l].append('input_{}_x_{}_y_{}_z_{}.txt'.format(j, *loc[:3]))
GFInput.parameters['type'] = [k]
GFInput.parameters['location'] = list(loc[:3])
gf_fn[l].append('GF_{}_x_{}_y_{}_z_{}'.format(j, *loc[:3]))
gf_extrap_fn[l].append('GF_extrap_{}_x_{}_y_{}_z_{}'.format(j, *loc[:3]))
# gf_extrap_fn2 = 'extrap_GF_{}_x_{}_y_{}_z_{}'.format(j, *loc[:3])
GFOutput[0].parameters['filename_prefix'] = [gf_fn[l][i]]
if IBC.extraparameters['extrap']:
GFOutput[1].parameters['filename_prefix'] = [gf_extrap_fn[l][i]]
GFSim = BasicSim(FullGrid,
FullDecomposition,
Time,
FullModel,
FullSimulation,
GFInput,
GFOutput)
GFSim.create(gf_input_fn[l][i], path=Dir.parameters['ibc_gf'])
# %% ----------------------------------------------------------------------
# Create volume boundary list files
# -------------------------------------------------------------------------
gf_vb_fn = IBCSim.volume_boundary(prefix='GF', list_fn=gf_fn,
path=Dir.parameters['ibc_gf'])
inj_vb_fn = IBCSim.volume_boundary(prefix='injection', list_fn=injection_fn,
path=Dir.parameters['ref'])
# %% ----------------------------------------------------------------------
# Create utility files
# -------------------------------------------------------------------------
gf_util_fn = IBCSim.utility(prefix='GF',
nt=Time.parameters['number_of_timesteps'][0],
sinj='sinj_locations.txt',
srec='srec_locations.txt',
path=Dir.parameters['ibc_gf'])
inj_util_fn = InjectionSim.utility(prefix='injection',
nt=Time.parameters['number_of_timesteps'][0],
sinj='sinj_locations.txt',
srec='srec_locations_injection.txt',
path=Dir.parameters['ref'])
# %%
# for i in SrecLocations.locations:
#subprocess.call(['diff', ])
# diff_string = 'diff /w04d2/bfilippo/pymh/pymh/examples/IBC_simulation/GF_files/' + gf_extrap_fn + '.su /w04d2/bfilippo/matterhorn_filippo/tests/EBC/EBC_injection_v3/freesurface/EBC_simulation/GF_files/' + gf_extrap_fn2 + '.su'
# diff_string = 'diff /w04d2/bfilippo/pymh/pymh/examples/IBC_simulation/GF_files/' + gf_fn + '_volume_boundary /w04d2/bfilippo/matterhorn_filippo/tests/EBC/EBC_injection_v3/freesurface/EBC_simulation/GF_files/' + gf_fn + '_volume_boundary'
# diff_string = ['diff', 'IBC_simulation/GF_files/' + gf_fn + '_volume_boundary', 'IBC_simulation/GF_files/v3/EBC_simulation/GF_files/' + gf_fn + '_volume_boundary']
# print diff_string
# os.system(diff_string)
# subprocess.call(diff_string)
# %%
inprefix = 'model_full'
ext = '.su'
model_full = {}
for attr in ('vp', 'rho'):
model_full[attr] = segyread_new.SEGYFile('/'.join([Dir.parameters['ref'], inprefix + '_' + attr + ext]), isSU=True)
inprefix = 'model_ibc'
ext = '.su'
model_ibc = {}
for attr in ('vp', 'rho'):
model_ibc[attr] = segyread_new.SEGYFile('/'.join([Dir.parameters['ibc'], inprefix + '_' + attr + ext]), isSU=True)
inprefix = 'model_inj'
ext = '.su'
model_inj = {}
for attr in ('vp', 'rho'):
model_inj[attr] = segyread_new.SEGYFile('/'.join([Dir.parameters['inj'], inprefix + '_' + attr + ext]), isSU=True)
fig = plt.figure()
gs = gridspec.GridSpec(3, 3)
ax = fig.add_subplot(gs[0, 0])
im = ax.imshow(model_full['vp'][:].T, aspect='auto')
ax = fig.add_subplot(gs[0, 1])
im = ax.imshow(model_ibc['vp'][:].T, aspect='auto')
ax = fig.add_subplot(gs[0, 2])
im = ax.imshow(model_inj['vp'][:].T, aspect='auto')
ax = fig.add_subplot(gs[1, 0])
im = ax.imshow(model_full['rho'][:].T, aspect='auto')
ax = fig.add_subplot(gs[1, 1])
im = ax.imshow(model_ibc['rho'][:].T, aspect='auto')
ax = fig.add_subplot(gs[1, 2])
im = ax.imshow(model_inj['rho'][:].T, aspect='auto')
plt.show()
|
gpl-3.0
|
LiaoPan/scikit-learn
|
examples/linear_model/plot_lasso_lars.py
|
363
|
1080
|
#!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
alexsavio/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regression.py
|
87
|
2510
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
|
bsd-3-clause
|
davidgbe/scikit-learn
|
sklearn/tree/export.py
|
78
|
15814
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
ocefpaf/mplleaflet
|
examples/contour.py
|
4
|
1535
|
import os
import matplotlib.pyplot as plt
import geopandas as gpd
import mplleaflet
# Download a file from: http://water.weather.gov/precip/download.php
# and change the path below
filename = os.path.join(os.path.dirname(__file__),
'data',
'nws_precip_year2date_observed_shape_20140406',
'nws_precip_year2date_observed_20140406.shp')
df = gpd.read_file(filename)
# Negative values are missing data so just drop them
df.rename(columns=lambda x: x.lower(), inplace=True)
df = df[df['globvalue'] > 0]
# Setting the index, then calling unstack() creates the matrix of values
# indexed by Hrapx in the columns, Hrapy in the rows. Try to do that in
# MATLAB!
df.set_index(['hrapy', 'hrapx'], inplace=True)
df = df.unstack()
# Sorting the values here is unnecessary, but do it just in case
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
g = df['globvalue']
plt.contour(4762.5 * (g.columns.values - 401),
4762.5 * (g.index.values - 1601), g)
# See http://www.nws.noaa.gov/oh/hrl/distmodel/hrap.htm
# Note: The Proj.4 CRS definition below is gleaned from reading the NWS and
# Proj.4 docs. Reach out if it's not correct although the resulting map looks
# right.
crs = {'lon_0': -105.0,
'lat_ts': 60.0,
'R': 6371200,
'proj': 'stere',
'units': 'm',
'lat_0': 90.0}
root, ext = os.path.splitext(__file__)
mapfile = root + '.html'
mplleaflet.show(crs=crs, path=mapfile, tiles='mapbox bright')
|
bsd-3-clause
|
msbeta/apollo
|
modules/tools/mapshow/libs/map.py
|
1
|
11041
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import random
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import common.proto_utils as proto_utils
from modules.map.proto import map_pb2
class Map:
def __init__(self):
self.map_pb = map_pb2.Map()
self.colors = []
self.init_colors()
def init_colors(self):
color_num = 6
self.colors = []
values = range(color_num)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def load(self, map_file_name):
res = proto_utils.get_pb_from_file(map_file_name, self.map_pb)
return res != None
def draw_roads(self, ax):
cnt = 1
for road in self.map_pb.road:
color_val = self.colors[cnt % len(self.colors)]
self.draw_road(ax, road, color_val)
cnt += 1
def draw_road(self, ax, road, color_val):
for section in road.section:
for edge in section.boundary.outer_polygon.edge:
for segment in edge.curve.segment:
if segment.HasField('line_segment'):
px = []
py = []
for p in segment.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_lanes(self, ax, is_show_lane_ids, laneids, is_show_lane_details):
cnt = 1
for lane in self.map_pb.lane:
color_val = self.colors[cnt % len(self.colors)]
if len(laneids) == 0:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
else:
if lane.id.id in laneids:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
if is_show_lane_ids:
self._draw_lane_id(lane, ax, color_val)
elif is_show_lane_details:
self._draw_lane_details(lane, ax, color_val)
elif lane.id.id in laneids:
print str(lane)
self._draw_lane_id(lane, ax, color_val)
cnt += 1
def _draw_lane_id(self, lane, ax, color_val):
"""draw lane id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x, y = self._find_lane_central_point(lane)
plt.annotate(
lane.id.id,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
def _draw_lane_details(self, lane, ax, color_val):
"""draw lane id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x, y = self._find_lane_central_point(lane)
details = str(lane.id.id)
for predecessor_id in lane.predecessor_id:
details += '\npre:' + str(predecessor_id.id)
for successor_id in lane.successor_id:
details += '\nsuc:' + str(successor_id.id)
for left_neighbor_forward_lane_id in lane.left_neighbor_forward_lane_id:
details += '\nlnf:' + str(left_neighbor_forward_lane_id.id)
for right_neighbor_forward_lane_id in lane.right_neighbor_forward_lane_id:
details += '\nrnf:' + str(right_neighbor_forward_lane_id.id)
for left_neighbor_reverse_lane_id in lane.left_neighbor_reverse_lane_id:
details += '\nlnr:' + str(left_neighbor_reverse_lane_id.id)
for right_neighbor_reverse_lane_id in lane.right_neighbor_reverse_lane_id:
details += '\nrnr:' + str(right_neighbor_reverse_lane_id.id)
plt.annotate(
details,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
def draw_pnc_junctions(self, ax):
cnt = 1
for pnc_junction in self.map_pb.pnc_junction:
color_val = self.colors[cnt % len(self.colors)]
self._draw_pnc_boundary(pnc_junction, ax, color_val)
self._draw_pnc_junction_id(pnc_junction, ax, color_val)
cnt += 1
def _draw_pnc_junction_id(self, pnc_junction, ax, color_val):
"""draw pnc_junction id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x = pnc_junction.polygon.point[0].x
y = pnc_junction.polygon.point[1].y
plt.annotate(
pnc_junction.id.id,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox=dict(boxstyle='round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.2',
fc=color_val, ec=color_val, alpha=0.5))
@staticmethod
def _find_lane_central_point(lane):
segment_idx = len(lane.left_boundary.curve.segment) / 2
median_segment = lane.left_boundary.curve.segment[segment_idx]
left_point_idx = len(median_segment.line_segment.point) / 2
left_median_point = median_segment.line_segment.point[left_point_idx]
segment_idx = len(lane.right_boundary.curve.segment) / 2
median_segment = lane.right_boundary.curve.segment[segment_idx]
right_point_idx = len(median_segment.line_segment.point) / 2
right_median_point = median_segment.line_segment.point[right_point_idx]
x = (left_median_point.x + right_median_point.x) / 2
y = (left_median_point.y + right_median_point.y) / 2
return x, y
@staticmethod
def _get_median_point(points):
"""get_median_point"""
if len(points) % 2 == 1:
point = points[len(points) / 2]
return point.x, point.y
else:
point1 = points[len(points) / 2 - 1]
point2 = points[len(points) / 2]
return (point1.x + point2.x) / 2.0, (point1.y + point2.y) / 2.0
@staticmethod
def _draw_lane_boundary(lane, ax, color_val):
"""draw boundary"""
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
@staticmethod
def _draw_lane_central(lane, ax, color_val):
"""draw boundary"""
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls=':', c=color_val, alpha=0.5)
@staticmethod
def _draw_pnc_boundary(pnc_junction, ax, color_val):
"""draw boundary"""
px = []
py = []
for point in pnc_junction.polygon.point:
px.append(point.x)
py.append(point.y)
ax.plot(px, py, ls='-', c=color_val, alpha=0.5)
def draw_signal_lights(self, ax):
"""draw_signal_lights"""
for signal in self.map_pb.signal:
for stop_line in signal.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, signal.id.id, ax, "mistyrose")
def draw_stop_signs(self, ax):
"""draw_stop_signs"""
for stop_sign in self.map_pb.stop_sign:
for stop_line in stop_sign.stop_line:
for curve in stop_line.segment:
self._draw_stop_line(curve.line_segment, stop_sign.id.id, ax, "yellow")
@staticmethod
def _draw_stop_line(line_segment, label, ax, label_color_val):
"""draw a signal"""
px = []
py = []
for p in line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, 'o-')
lxy = [random.randint(20, 80) * random.sample([-1, 1], 1)[0],
random.randint(20, 80) * random.sample([-1, 1], 1)[0]]
xy = (sum(px) / len(px), sum(py) / len(py))
plt.annotate(
label,
xy=xy, xytext=lxy,
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.5', fc=label_color_val, alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
|
apache-2.0
|
christinahedges/PyKE
|
pyke/keppixseries.py
|
2
|
24628
|
from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepplot, kepstat, kepfunc
__all__ = ['keppixseries']
def keppixseries(infile, outfile=None, plotfile=None, plottype='global',
filterlc=False, function='boxcar', cutoff=1.0, overwrite=False,
verbose=False, logfile='keppixseries.log'):
"""
keppixseries -- individual time series photometry for all pixels within a
target mask
keppixseries plots a light curve for each individual pixel in a target
mask. Light curves are extracted from a target pixel file obtained from the
Kepler data archive at MAST. If required, the data can be fed through a
boxcar, gaussian or sinc function high bandpass filter in order to remove
low frequency signal from the data. keppixseries is a diagnostic tool for
identifying source contaminants in the background or foreground of the
target. It can be employed to identify pixels for inclusion or exclusion
when re-extracting a Kepler light curve from target pixel files.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing Kepler Target
Pixel data within the first data extension.
outfile : str
The name of the output FITS file. This file has two data extensions.
The first called 'PIXELSERIES' contains a table with columns of
barycenter-corrected time, barycenter time correction, cadence number,
cadence quality flag and a series of photometric light curves, one for
each pixel within the target mask. Each pixel is labeled COLx_ROWy,
where :math:`x` is the pixel column number and :math:`y` is the pixel
row number on the CCD module/output. The second extension contains the
mask definition map copied directly from the input target pixel file.
plotfile : str
Name of an optional diagnostic output plot file containing the results
of keppixseries. An example is provided in Figure 1. Typically this is
a PNG format file. If no diagnostic file is required, plotfile can be
'None'. The plot will be generated regardless of the value of this
field, but the plot will not be saved to a file if ``plotfile='None'``.
plottype : str
keppixseries can plot light curves of three types.
The choice is made using this argument. The options are:
* local - All individual pixel light curves are scaled separately to
provide the most dynamic range for each pixel.
* global - All pixel light curves are scaled between zero and the
maximum flux attained by the brightest pixel in the mask. This option
provides the relative contribution to the archived light curve by each
pixel.
* full - All pixels light curves are scaled between zero and the
maximum flux attained by that pixel. This provides the fraction of
variability within each individual pixel.
filterlc : bool
If True, the light curve for each pixel will be treated by a high
band-pass filter to remove long-term trends from e.g. differential
velocity aberration.
function : str
The functional form of the high pass-band filter:
* boxcar
* gauss
* sinc
cutoff : float
The frequency of the high pass-band cutoff in units of :math:`days^{-1}`.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile = str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block :: bash
$ keppixseries kplr008256049-2010174085026_lpd-targ.fits.gz
.. image:: ../_static/images/api/keppixseries.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPIXSERIES -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' plotfile={}'.format(plotfile)
+ ' plottype={}'.format(plottype)
+ ' filterlc={}'.format(filterlc)
+ ' function={}'.format(function)
+ ' cutoff={}'.format(cutoff)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPPIXSERIES started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPPIXSERIES: {} exists. Use --overwrite'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open TPF FITS file
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile, 'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile, 'QUALITY', logfile, verbose)
# read mask defintion data from TPF file
maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition(infile, logfile,
verbose)
# print target data
print('')
print(' KepID: {}'.format(kepid))
print(' RA (J2000): {}'.format(ra))
print('Dec (J2000): {}'.format(dec))
print(' KepMag: {}'.format(kepmag))
print(' SkyGroup: {}'.format(skygroup))
print(' Season: {}'.format(season))
print(' Channel: {}'.format(channel))
print(' Module: {}'.format(module))
print(' Output: {}'.format(output))
print('')
# how many quality = 0 rows?
npts = 0
nrows = len(fluxpixels)
for i in range(nrows):
if (qual[i] == 0 and np.isfinite(barytime[i])
and np.isfinite(fluxpixels[i, ydim * xdim // 2])):
npts += 1
time = np.empty((npts))
timecorr = np.empty((npts))
cadenceno = np.empty((npts))
quality = np.empty((npts))
pixseries = np.empty((ydim, xdim, npts))
errseries = np.empty((ydim, xdim, npts))
# construct output light curves
nptsx = 0
for i in tqdm(range(ydim)):
for j in range(xdim):
npts = 0
for k in range(nrows):
if (qual[k] == 0 and np.isfinite(barytime[k])
and np.isfinite(fluxpixels[k, int(ydim*xdim/2)])):
time[npts] = barytime[k]
timecorr[npts] = tcorr[k]
cadenceno[npts] = cadno[k]
quality[npts] = qual[k]
pixseries[i, j, npts] = fluxpixels[k, nptsx]
errseries[i, j, npts] = errpixels[k, nptsx]
npts += 1
nptsx += 1
# define data sampling
if filterlc:
tpf = pyfits.open(infile)
cadence = kepkey.cadence(tpf[1], infile, logfile, verbose)
tr = 1.0 / (cadence / 86400)
timescale = 1.0 / (cutoff / tr)
# define convolution function
if function == 'boxcar':
filtfunc = np.ones(int(np.ceil(timescale)))
elif function == 'gauss':
timescale /= 2
dx = np.ceil(timescale * 10 + 1)
filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale],
np.linspace(0, dx - 1, dx))
elif function == 'sinc':
dx = np.ceil(timescale * 12 + 1)
fx = np.linspace(0, dx - 1, dx)
fx = fx - dx / 2 + 0.5
fx /= timescale
filtfunc = np.sinc(fx)
filtfunc /= np.sum(filtfunc)
# pad time series at both ends with noise model
for i in range(ydim):
for j in range(xdim):
ave, sigma = (np.mean(pixseries[i, j, :len(filtfunc)]),
np.std(pixseries[i, j, :len(filtfunc)]))
padded = np.append(kepstat.randarray(np.ones(len(filtfunc)) * ave,
np.ones(len(filtfunc)) * sigma), pixseries[i, j, :])
ave, sigma = (np.mean(pixseries[i, j, -len(filtfunc):]),
np.std(pixseries[i, j, -len(filtfunc):]))
padded = np.append(padded,
kepstat.randarray(np.ones(len(filtfunc)) * ave,
np.ones(len(filtfunc)) * sigma))
# convolve data
convolved = np.convolve(padded, filtfunc, 'same')
# remove padding from the output array
outdata = convolved[len(filtfunc): -len(filtfunc)]
# subtract low frequencies
outmedian = np.median(outdata)
pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian
# construct output file
print("Writing output file {}...".format(outfile))
if ydim * xdim < 1000:
instruct = pyfits.open(infile, 'readonly')
kepkey.history(call, instruct[0], outfile, logfile, verbose)
hdulist = pyfits.HDUList(instruct[0])
cols = []
cols.append(pyfits.Column(name='TIME', format='D',
unit='BJD - 2454833', disp='D12.7',
array=time))
cols.append(pyfits.Column(name='TIMECORR', format='E', unit='d',
disp='E13.6', array=timecorr))
cols.append(pyfits.Column(name='CADENCENO', format='J', disp='I10',
array=cadenceno))
cols.append(pyfits.Column(name='QUALITY', format='J', array=quality))
for i in range(ydim):
for j in range(xdim):
colname = 'COL{}_ROW{}'.format(i + column, j + row)
cols.append(pyfits.Column(name=colname, format='E',
disp='E13.6',
array=pixseries[i, j, :]))
hdu1 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
try:
hdu1.header['INHERIT'] = (True, 'inherit the primary header')
except:
pass
try:
hdu1.header['EXTNAME'] = ('PIXELSERIES', 'name of extension')
except:
pass
try:
hdu1.header['EXTVER' ] = (instruct[1].header['EXTVER'],
'extension version number (not format version)')
except:
pass
try:
hdu1.header['TELESCOP'] = (instruct[1].header['TELESCOP'],
'telescope')
except:
pass
try:
hdu1.header['INSTRUME'] = (instruct[1].header['INSTRUME'],
'detector type')
except:
pass
try:
hdu1.header['OBJECT' ] = (instruct[1].header['OBJECT'],
'string version of KEPLERID')
except:
pass
try:
hdu1.header['KEPLERID'] = (instruct[1].header['KEPLERID'],
'unique Kepler target identifier')
except:
pass
try:
hdu1.header['RADESYS'] = (instruct[1].header['RADESYS'],
'reference frame of celestial coordinates')
except:
pass
try:
hdu1.header['RA_OBJ' ] = (instruct[1].header['RA_OBJ'],
'[deg] right ascension from KIC')
except:
pass
try:
hdu1.header['DEC_OBJ'] = (instruct[1].header['DEC_OBJ'],
'[deg] declination from KIC')
except:
pass
try:
hdu1.header['EQUINOX'] = (instruct[1].header['EQUINOX'],
'equinox of celestial coordinate system')
except:
pass
try:
hdu1.header['TIMEREF'] = (instruct[1].header['TIMEREF'],
'barycentric correction applied to times')
except:
pass
try:
hdu1.header['TASSIGN'] = (instruct[1].header['TASSIGN'],
'where time is assigned')
except:
pass
try:
hdu1.header['TIMESYS'] = (instruct[1].header['TIMESYS'],
'time system is barycentric JD')
except:
pass
try:
hdu1.header['BJDREFI'] = (instruct[1].header['BJDREFI'],
'integer part of BJD reference date')
except:
pass
try:
hdu1.header['BJDREFF'] = (instruct[1].header['BJDREFF'],
'fraction of the day in BJD reference date')
except:
pass
try:
hdu1.header['TIMEUNIT'] = (instruct[1].header['TIMEUNIT'],
'time unit for TIME, TSTART and TSTOP')
except:
pass
try:
hdu1.header['TSTART'] = (instruct[1].header['TSTART'],
'observation start time in BJD-BJDREF')
except:
pass
try:
hdu1.header['TSTOP'] = (instruct[1].header['TSTOP'],
'observation stop time in BJD-BJDREF')
except:
pass
try:
hdu1.header['LC_START'] = (instruct[1].header['LC_START'],
'mid point of first cadence in MJD')
except:
pass
try:
hdu1.header['LC_END'] = (instruct[1].header['LC_END'],
'mid point of last cadence in MJD')
except:
pass
try:
hdu1.header['TELAPSE'] = (instruct[1].header['TELAPSE'],
'[d] TSTOP - TSTART')
except:
pass
try:
hdu1.header['LIVETIME'] = (instruct[1].header['LIVETIME'],
'[d] TELAPSE multiplied by DEADC')
except:
pass
try:
hdu1.header['EXPOSURE'] = (instruct[1].header['EXPOSURE'],
'[d] time on source')
except:
pass
try:
hdu1.header['DEADC'] = (instruct[1].header['DEADC'],
'deadtime correction')
except:
pass
try:
hdu1.header['TIMEPIXR'] = (instruct[1].header['TIMEPIXR'],
'bin time beginning=0 middle=0.5 end=1')
except:
pass
try:
hdu1.header['TIERRELA'] = (instruct[1].header['TIERRELA'],
'[d] relative time error')
except:
pass
try:
hdu1.header['TIERABSO'] = (instruct[1].header['TIERABSO'],
'[d] absolute time error')
except:
pass
try:
hdu1.header['INT_TIME'] = (instruct[1].header['INT_TIME'],
'[s] photon accumulation time per frame')
except:
pass
try:
hdu1.header['READTIME'] = (instruct[1].header['READTIME'],
'[s] readout time per frame')
except:
pass
try:
hdu1.header['FRAMETIM'] = (instruct[1].header['FRAMETIM'],
'[s] frame time (INT_TIME + READTIME)')
except:
pass
try:
hdu1.header['NUM_FRM'] = (instruct[1].header['NUM_FRM'],
'number of frames per time stamp')
except:
pass
try:
hdu1.header['TIMEDEL'] = (instruct[1].header['TIMEDEL'],
'[d] time resolution of data')
except:
pass
try:
hdu1.header['DATE-OBS'] = (instruct[1].header['DATE-OBS'],
'TSTART as UTC calendar date')
except:
pass
try:
hdu1.header['DATE-END'] = (instruct[1].header['DATE-END'],
'TSTOP as UTC calendar date')
except:
pass
try:
hdu1.header['BACKAPP'] = (instruct[1].header['BACKAPP'],
'background is subtracted')
except:
pass
try:
hdu1.header['DEADAPP'] = (instruct[1].header['DEADAPP'],
'deadtime applied')
except:
pass
try:
hdu1.header['VIGNAPP'] = (instruct[1].header['VIGNAPP'],
'vignetting or collimator correction applied')
except:
pass
try:
hdu1.header['GAIN'] = (instruct[1].header['GAIN'],
'[electrons/count] channel gain')
except:
pass
try:
hdu1.header['READNOIS'] = (instruct[1].header['READNOIS'],
'[electrons] read noise')
except:
pass
try:
hdu1.header['NREADOUT'] = (instruct[1].header['NREADOUT'],
'number of read per cadence')
except:
pass
try:
hdu1.header['TIMSLICE'] = (instruct[1].header['TIMSLICE'],
'time-slice readout sequence section')
except:
pass
try:
hdu1.header['MEANBLCK'] = (instruct[1].header['MEANBLCK'],
'[count] FSW mean black level')
except:
pass
hdulist.append(hdu1)
hdulist.writeto(outfile)
kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2],
outfile, logfile, verbose)
pyfits.append(outfile, instruct[2].data, instruct[2].header)
instruct.close()
else:
warnmsg = ('WARNING -- KEPPIXSERIES: output FITS file requires > 999'
'columns. Non-compliant with FITS convention.')
kepmsg.warn(logfile, warnmsg, verbose)
# plot pixel array
fmin = 1.0e33
fmax = -1.033
plt.figure()
plt.clf()
dx = 0.93 / xdim
dy = 0.94 / ydim
ax = plt.axes([0.06, 0.05, 0.93, 0.94])
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(integer=True))
plt.xlim(np.min(pixcoord1) - 0.5, np.max(pixcoord1) + 0.5)
plt.ylim(np.min(pixcoord2) - 0.5, np.max(pixcoord2) + 0.5)
plt.xlabel('time', {'color' : 'k'})
plt.ylabel('arbitrary flux', {'color' : 'k'})
for i in range(ydim):
for j in range(xdim):
tmin = np.amin(time)
tmax = np.amax(time)
try:
np.isfinite(np.amin(pixseries[i, j, :]))
np.isfinite(np.amin(pixseries[i, j, :]))
fmin = np.amin(pixseries[i, j, :])
fmax = np.amax(pixseries[i, j, :])
except:
ugh = 1
xmin = tmin - (tmax - tmin) / 40
xmax = tmax + (tmax - tmin) / 40
ymin = fmin - (fmax - fmin) / 20
ymax = fmax + (fmax - fmin) / 20
if kepstat.bitInBitmap(maskimg[i, j], 2):
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
facecolor='lightslategray')
elif maskimg[i, j] == 0:
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
facecolor='black')
else:
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy])
if j == int(xdim / 2) and i == 0:
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
elif j == 0 and i == int(ydim / 2):
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
else:
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
ptime = time * 1.0
ptime = np.insert(ptime, [0], ptime[0])
ptime = np.append(ptime, ptime[-1])
pflux = pixseries[i, j, :] * 1.0
pflux = np.insert(pflux, [0], -1000.0)
pflux = np.append(pflux, -1000.0)
plt.plot(time,pixseries[i, j, :], color='#0000ff', linestyle='-',
linewidth=0.5)
if not kepstat.bitInBitmap(maskimg[i, j], 2):
plt.fill(ptime, pflux, fc='lightslategray', linewidth=0.0,
alpha=1.0)
plt.fill(ptime, pflux, fc='#FFF380', linewidth=0.0,alpha=1.0)
if 'loc' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if 'glob' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(1.0e-10, np.nanmax(pixseries) * 1.05)
if 'full' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(1.0e-10, ymax * 1.05)
# render plot
plt.show()
plt.savefig(plotfile)
# stop time
kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose)
def keppixseries_main():
import argparse
parser = argparse.ArgumentParser(
description=('Individual time series photometry for all pixels'
' within a target mask'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keppixseries.'),
default=None)
parser.add_argument('--plotfile', default='None',
help='name of output PNG plot file', type=str)
parser.add_argument('--plottype', default='global', help='Plotting type',
type=str, choices=['local','global','full'])
parser.add_argument('--filterlc', action='store_true',
help='High-pass Filter data?')
parser.add_argument('--function', default='boxcar', help='Type of filter',
type=str, choices=['boxcar','gauss','sinc'])
parser.add_argument('--cutoff', default=1.0,
help='Characteristic frequency cutoff of filter [1/days]',
type=float)
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keppixseries.log', dest='logfile', type=str)
args = parser.parse_args()
keppixseries(args.infile, args.outfile, args.plotfile, args.plottype,
args.filterlc, args.function, args.cutoff, args.overwrite,
args.verbose, args.logfile)
|
mit
|
xuewei4d/scikit-learn
|
sklearn/model_selection/tests/test_split.py
|
6
|
63286
|
"""Test the split module"""
import warnings
import pytest
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.special import comb
from itertools import combinations
from itertools import combinations_with_replacement
from itertools import permutations
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raises_regexp
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils._mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _build_repr
from sklearn.model_selection._split import _yields_constant_splits
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size=None, train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert n_splits_expected[i] == cv.get_n_splits(X, y, groups)
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert np.asarray(train).dtype.kind == 'i'
assert np.asarray(test).dtype.kind == 'i'
# Test if the repr works without any errors
assert cv_repr == repr(cv)
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)
def test_2d_y():
# smoke test for 2d y and multi-label
n_samples = 30
rng = np.random.RandomState(1)
X = rng.randint(0, 3, size=(n_samples, 2))
y = rng.randint(0, 3, size=(n_samples,))
y_2d = y.reshape(-1, 1)
y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
groups = rng.randint(0, 3, size=(n_samples,))
splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(),
RepeatedKFold(), RepeatedStratifiedKFold(),
ShuffleSplit(), StratifiedShuffleSplit(test_size=.5),
GroupShuffleSplit(), LeaveOneGroupOut(),
LeavePGroupsOut(n_groups=2), GroupKFold(n_splits=3),
TimeSeriesSplit(), PredefinedSplit(test_fold=groups)]
for splitter in splitters:
list(splitter.split(X, y, groups))
list(splitter.split(X, y_2d, groups))
try:
list(splitter.split(X, y_multilabel, groups))
except ValueError as e:
allowed_target_types = ('binary', 'multiclass')
msg = "Supported target types are: {}. Got 'multilabel".format(
allowed_target_types)
assert msg in str(e)
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert train.intersection(test) == set()
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert train.union(test) == set(range(n_samples))
def check_cv_coverage(cv, X, y, groups, expected_n_splits):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
assert cv.get_n_splits(X, y, groups) == expected_n_splits
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert iterations == expected_n_splits
if n_samples is not None:
assert collected_test_samples == set(range(n_samples))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert 5 == KFold(5).get_n_splits(X2)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert 5 == StratifiedKFold(5).get_n_splits(X, y)
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
# Check equivalence to KFold
y = [0, 1, 0, 1, 0, 1, 0, 1]
X = np.ones_like(y)
np.testing.assert_equal(
list(StratifiedKFold(3).split(X, y)),
list(KFold(3).split(X, y)))
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('k', [4, 5, 6, 7, 8, 9, 10])
def test_stratified_kfold_ratios(k, shuffle):
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
distr = np.bincount(y) / len(y)
test_sizes = []
random_state = None if not shuffle else 0
skf = StratifiedKFold(k, random_state=random_state, shuffle=shuffle)
for train, test in skf.split(X, y):
assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02)
assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02)
test_sizes.append(len(test))
assert np.ptp(test_sizes) <= 1
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('k', [4, 6, 7])
def test_stratified_kfold_label_invariance(k, shuffle):
# Check that stratified kfold gives the same indices regardless of labels
n_samples = 100
y = np.array([2] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
X = np.ones(len(y))
def get_splits(y):
random_state = None if not shuffle else 0
return [(list(train), list(test))
for train, test
in StratifiedKFold(k, random_state=random_state,
shuffle=shuffle).split(X, y)]
splits_base = get_splits(y)
for perm in permutations([0, 1, 2]):
y_perm = np.take(perm, y)
splits_perm = get_splits(y_perm)
assert splits_perm == splits_base
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = [len(test) for _, test in kf]
assert (np.max(sizes) - np.min(sizes)) <= 1
assert np.sum(sizes) == i
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = [len(test) for _, test in skf]
assert (np.max(sizes) - np.min(sizes)) <= 1
assert np.sum(sizes) == i
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert len(np.intersect1d(tr_a, tr_b)) != len(tr1)
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert sum(all_folds) == 300
def test_shuffle_kfold_stratifiedkfold_reproducibility():
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
# Check that when the shuffle is True, multiple split calls produce the
# same split when random_state is int
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
# Check that when the shuffle is True, multiple split calls often
# (not always) produce different splits when random_state is
# RandomState instance or None
kf = KFold(3, shuffle=True, random_state=np.random.RandomState(0))
skf = StratifiedKFold(3, shuffle=True,
random_state=np.random.RandomState(0))
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
# Test if the two splits are different cv
for (_, test_a), (_, test_b) in zip(cv.split(*data),
cv.split(*data)):
# cv.split(...) returns an array of tuples, each tuple
# consisting of an array with train indices and test indices
# Ensure that the splits for data are not same
# when random state is not set
with pytest.raises(AssertionError):
np.testing.assert_array_equal(test_a, test_b)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert set(test0) != set(test1)
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
# Ensure that we shuffle each class's samples with different
# random_state in StratifiedKFold
# See https://github.com/scikit-learn/scikit-learn/pull/13124
X = np.arange(10)
y = [0] * 5 + [1] * 5
kf1 = StratifiedKFold(5, shuffle=True, random_state=0)
kf2 = StratifiedKFold(5, shuffle=True, random_state=1)
test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)])
test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)])
assert test_set1 != test_set2
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert 0.92 > mean_score
assert mean_score > 0.80
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert mean_score > 0.92
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert mean_score > 0.92
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert 0.94 > mean_score
assert mean_score > 0.80
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
ss4 = ShuffleSplit(test_size=int(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
@pytest.mark.parametrize("split_class", [ShuffleSplit,
StratifiedShuffleSplit])
@pytest.mark.parametrize("train_size, exp_train, exp_test",
[(None, 9, 1),
(8, 8, 2),
(0.8, 8, 2)])
def test_shuffle_split_default_test_size(split_class, train_size, exp_train,
exp_test):
# Check that the default value has the expected behavior, i.e. 0.1 if both
# unspecified or complement train_size unless both are specified.
X = np.ones(10)
y = np.ones(10)
X_train, X_test = next(split_class(train_size=train_size).split(X, y))
assert len(X_train) == exp_train
assert len(X_test) == exp_test
@pytest.mark.parametrize("train_size, exp_train, exp_test",
[(None, 8, 2),
(7, 7, 3),
(0.7, 7, 3)])
def test_group_shuffle_split_default_test_size(train_size, exp_train,
exp_test):
# Check that the default value has the expected behavior, i.e. 0.2 if both
# unspecified or complement train_size unless both are specified.
X = np.ones(10)
y = np.ones(10)
groups = range(10)
X_train, X_test = next(GroupShuffleSplit(train_size=train_size)
.split(X, y, groups))
assert len(X_train) == exp_train
assert len(X_test) == exp_test
@ignore_warnings
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert len(train) == train_size
assert len(test) == test_size
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert len(train) + len(test) == y.size
assert len(train) == train_size
assert len(test) == test_size
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert prob > threshold, \
"An index is not drawn with chance corresponding to even draws"
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert n_splits_actual == n_splits
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert len(train) == n_train
assert len(test) == n_test
assert len(set(train).intersection(test)) == 0
group_counts = np.unique(groups)
assert splits.test_size == 1.0 / n_folds
assert n_train + n_test == len(groups)
assert len(group_counts) == 2
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
def test_stratified_shuffle_split_multilabel():
# fix for issue 9037
for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
# correct stratification of entire rows
# (by design, here y[:, 0] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 0])
assert expected_ratio == np.mean(y_train[:, 0])
assert expected_ratio == np.mean(y_test[:, 0])
def test_stratified_shuffle_split_multilabel_many_labels():
# fix in PR #9922: for multilabel data with > 1000 labels, str(row)
# truncates with an ellipsis for elements in positions 4 through
# len(row) - 4, so labels were not being correctly split using the powerset
# method for transforming a multilabel problem to a multiclass one; this
# test checks that this problem is fixed.
row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1]
row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1]
y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100)
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# correct stratification of entire rows
# (by design, here y[:, 4] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 4])
assert expected_ratio == np.mean(y_train[:, 4])
assert expected_ratio == np.mean(y_test[:, 4])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = np.full(10, -1.)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert len(np.unique(folds)) == ps.get_n_splits()
ps_train, ps_test = zip(*ps.split())
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1. / 3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert slo.get_n_splits(X, y, groups=groups_i) == n_splits
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert not np.any(np.in1d(l[train], l_test_unique))
assert not np.any(np.in1d(l[test], l_train_unique))
# Second test: train and test add up to all the data
assert l[train].size + l[test].size == l.size
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1
assert abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert repr(logo) == 'LeaveOneGroupOut()'
assert repr(lpgo_1) == 'LeavePGroupsOut(n_groups=1)'
assert repr(lpgo_2) == 'LeavePGroupsOut(n_groups=2)'
assert (repr(LeavePGroupsOut(n_groups=3)) ==
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert cv.get_n_splits(X, y, groups=groups_i) == n_splits
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert len(train) + len(test) == len(groups_i)
# Third test:
# The number of groups in test must be equal to p_groups_out
assert np.unique(groups_arr[test]).shape[0], p_groups_out
# check get_n_splits() with dummy parameters
assert logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']) == 3
assert logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]) == 3
assert lpgo_2.get_n_splits(None, None, np.arange(4)) == 6
assert lpgo_1.get_n_splits(groups=np.arange(4)) == 4
# raise ValueError if a `groups` parameter is illegal
with assert_raises(ValueError):
logo.get_n_splits(None, None, [0.0, np.nan, 0.0])
with assert_raises(ValueError):
lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0])
msg = "The 'groups' parameter should not be None."
assert_raise_message(ValueError, msg,
logo.get_n_splits, None, None, None)
assert_raise_message(ValueError, msg,
lpgo_1.get_n_splits, None, None, None)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert (
3 == LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert 3 == LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups)
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ({}). "
"LeaveOneGroupOut expects at least 2.").format(groups)
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
@ignore_warnings
def test_repeated_cv_value_errors():
# n_repeats is not integer or <= 0
for cv in (RepeatedKFold, RepeatedStratifiedKFold):
assert_raises(ValueError, cv, n_repeats=0)
assert_raises(ValueError, cv, n_repeats=1.5)
@pytest.mark.parametrize(
"RepeatedCV", [RepeatedKFold, RepeatedStratifiedKFold]
)
def test_repeated_cv_repr(RepeatedCV):
n_splits, n_repeats = 2, 6
repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats)
repeated_cv_repr = ('{}(n_repeats=6, n_splits=2, random_state=None)'
.format(repeated_cv.__class__.__name__))
assert repeated_cv_repr == repr(repeated_cv)
def test_repeated_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
random_state = 258173307
rkf = RepeatedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rkf.split(X)
train, test = next(splits)
assert_array_equal(train, [2, 4])
assert_array_equal(test, [0, 1, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 3])
assert_array_equal(test, [2, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3, 4])
assert_array_equal(test, [0, 1])
assert_raises(StopIteration, next, splits)
def test_get_n_splits_for_repeated_kfold():
n_splits = 3
n_repeats = 4
rkf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats)
expected_n_splits = n_splits * n_repeats
assert expected_n_splits == rkf.get_n_splits()
def test_get_n_splits_for_repeated_stratified_kfold():
n_splits = 3
n_repeats = 4
rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
expected_n_splits = n_splits * n_repeats
assert expected_n_splits == rskf.get_n_splits()
def test_repeated_stratified_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
y = [1, 1, 1, 0, 0]
random_state = 1944695409
rskf = RepeatedStratifiedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rskf.split(X, y)
train, test = next(splits)
assert_array_equal(train, [1, 4])
assert_array_equal(test, [0, 2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 2, 3])
assert_array_equal(test, [1, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3])
assert_array_equal(test, [0, 1, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 4])
assert_array_equal(test, [2, 3])
assert_raises(StopIteration, next, splits)
def test_train_test_split_errors():
pytest.raises(ValueError, train_test_split)
pytest.raises(ValueError, train_test_split, range(3), train_size=1.1)
pytest.raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
pytest.raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
pytest.raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
pytest.raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
pytest.raises(TypeError, train_test_split, range(3),
some_argument=1.1)
pytest.raises(ValueError, train_test_split, range(3), range(42))
pytest.raises(ValueError, train_test_split, range(10),
shuffle=False, stratify=True)
with pytest.raises(ValueError,
match=r'train_size=11 should be either positive and '
r'smaller than the number of samples 10 or a '
r'float in the \(0, 1\) range'):
train_test_split(range(10), train_size=11, test_size=1)
@pytest.mark.parametrize("train_size,test_size", [
(1.2, 0.8),
(1., 0.8),
(0.0, 0.8),
(-.2, 0.8),
(0.8, 1.2),
(0.8, 1.),
(0.8, 0.),
(0.8, -.2)])
def test_train_test_split_invalid_sizes1(train_size, test_size):
with pytest.raises(ValueError,
match=r'should be .* in the \(0, 1\) range'):
train_test_split(range(10), train_size=train_size, test_size=test_size)
@pytest.mark.parametrize("train_size,test_size", [
(-10, 0.8),
(0, 0.8),
(11, 0.8),
(0.8, -10),
(0.8, 0),
(0.8, 11)])
def test_train_test_split_invalid_sizes2(train_size, test_size):
with pytest.raises(ValueError,
match=r'should be either positive and smaller'):
train_test_split(range(10), train_size=train_size, test_size=test_size)
@pytest.mark.parametrize("train_size, exp_train, exp_test",
[(None, 7, 3),
(8, 8, 2),
(0.8, 8, 2)])
def test_train_test_split_default_test_size(train_size, exp_train, exp_test):
# Check that the default value has the expected behavior, i.e. complement
# train_size unless both are specified.
X_train, X_test = train_test_split(X, train_size=train_size)
assert len(X_train) == exp_train
assert len(X_test) == exp_test
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert len(y_test) == len(y_train)
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert isinstance(y_train, list)
assert isinstance(y_test, list)
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert split[0].shape == (7, 5, 3, 2)
assert split[1].shape == (3, 5, 3, 2)
assert split[2].shape == (7, 7, 11)
assert split[3].shape == (3, 7, 11)
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert len(test) == exp_test_size
assert len(test) + len(train) == len(y)
# check the 1:1 ratio of ones and twos in the data is preserved
assert np.sum(train == 1) == np.sum(train == 2)
# test unshuffled split
y = np.arange(10)
for test_size in [2, 0.2]:
train, test = train_test_split(y, shuffle=False, test_size=test_size)
assert_array_equal(test, [8, 9])
assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7])
@ignore_warnings
def test_train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert isinstance(X_train, InputFeatureType)
assert isinstance(X_test, InputFeatureType)
def test_train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert isinstance(X_train, csr_matrix)
assert isinstance(X_test, csr_matrix)
def test_train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert isinstance(X_train, MockDataFrame)
assert isinstance(X_test, MockDataFrame)
X_train_arr, X_test_arr = train_test_split(X_df)
def test_train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
@pytest.mark.parametrize("test_size, train_size",
[(2.0, None),
(1.0, None),
(0.1, 0.95),
(None, 1j),
(11, None),
(10, None),
(8, 3)])
def test_shufflesplit_errors(test_size, train_size):
with pytest.raises(ValueError):
next(ShuffleSplit(test_size=test_size, train_size=train_size).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
# also works with 2d multiclass
y_multiclass_2d = y_multiclass.reshape(-1, 1)
cv = check_cv(3, y_multiclass_2d, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass_2d)),
list(cv.split(X, y_multiclass_2d)))
assert not np.all(
next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] ==
next(KFold(3).split(X, y_multiclass_2d))[0])
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
kf_iter = KFold().split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(shuffle=True, random_state=0).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
# numpy's assert_array_equal properly compares nested lists
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
splits_are_equal = True
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
except AssertionError:
splits_are_equal = False
assert not splits_are_equal, (
"If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert len(folds) == len(groups)
for i in np.unique(folds):
assert (tolerance >=
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert len(np.unique(folds[groups == group])) == 1
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert len(np.intersect1d(groups[train], groups[test])) == 0
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert len(folds) == len(groups)
for i in np.unique(folds):
assert (tolerance >=
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
for group in np.unique(groups):
assert len(np.unique(folds[groups == group])) == 1
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert len(np.intersect1d(groups[train], groups[test])) == 0
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert n_splits_actual == tscv.get_n_splits()
assert n_splits_actual == 2
def _check_time_series_max_train_size(splits, check_splits, max_train_size):
for (train, test), (check_train, check_test) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert len(check_train) <= max_train_size
suffix_start = max(len(train) - max_train_size, 0)
assert_array_equal(check_train, train[suffix_start:])
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=3)
# Test for the case where the size of a fold is greater than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
# Test for the case where the size of each fold is less than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
def test_time_series_test_size():
X = np.zeros((10, 1))
# Test alone
splits = TimeSeriesSplit(n_splits=3, test_size=3).split(X)
train, test = next(splits)
assert_array_equal(train, [0])
assert_array_equal(test, [1, 2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5, 6])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6])
assert_array_equal(test, [7, 8, 9])
# Test with max_train_size
splits = TimeSeriesSplit(n_splits=2, test_size=2,
max_train_size=4).split(X)
train, test = next(splits)
assert_array_equal(train, [2, 3, 4, 5])
assert_array_equal(test, [6, 7])
train, test = next(splits)
assert_array_equal(train, [4, 5, 6, 7])
assert_array_equal(test, [8, 9])
# Should fail with not enough data points for configuration
with pytest.raises(ValueError, match="Too many splits.*with test_size"):
splits = TimeSeriesSplit(n_splits=5, test_size=2).split(X)
next(splits)
def test_time_series_gap():
X = np.zeros((10, 1))
# Test alone
splits = TimeSeriesSplit(n_splits=2, gap=2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [4, 5, 6])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [7, 8, 9])
# Test with max_train_size
splits = TimeSeriesSplit(n_splits=3, gap=2, max_train_size=2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [4, 5])
train, test = next(splits)
assert_array_equal(train, [2, 3])
assert_array_equal(test, [6, 7])
train, test = next(splits)
assert_array_equal(train, [4, 5])
assert_array_equal(test, [8, 9])
# Test with test_size
splits = TimeSeriesSplit(n_splits=2, gap=2,
max_train_size=4, test_size=2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [6, 7])
train, test = next(splits)
assert_array_equal(train, [2, 3, 4, 5])
assert_array_equal(test, [8, 9])
# Test with additional test_size
splits = TimeSeriesSplit(n_splits=2, gap=2, test_size=3).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [4, 5, 6])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [7, 8, 9])
# Verify proper error is thrown
with pytest.raises(ValueError, match="Too many splits.*and gap"):
splits = TimeSeriesSplit(n_splits=4, gap=2).split(X)
next(splits)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(n_splits=3),
StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv, error_score='raise')
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert repr(MockSplitter(5, 6)) == "MockSplitter(a=5, b=6, c=None)"
@pytest.mark.parametrize('CVSplitter', (ShuffleSplit, GroupShuffleSplit,
StratifiedShuffleSplit))
def test_shuffle_split_empty_trainset(CVSplitter):
cv = CVSplitter(test_size=.99)
X, y = [[1]], [0] # 1 sample
with pytest.raises(
ValueError,
match='With n_samples=1, test_size=0.99 and train_size=None, '
'the resulting train set will be empty'):
next(cv.split(X, y, groups=[1]))
def test_train_test_split_empty_trainset():
X, = [[1]] # 1 sample
with pytest.raises(
ValueError,
match='With n_samples=1, test_size=0.99 and train_size=None, '
'the resulting train set will be empty'):
train_test_split(X, test_size=.99)
X = [[1], [1], [1]] # 3 samples, ask for more than 2 thirds
with pytest.raises(
ValueError,
match='With n_samples=3, test_size=0.67 and train_size=None, '
'the resulting train set will be empty'):
train_test_split(X, test_size=.67)
def test_leave_one_out_empty_trainset():
# LeaveOneGroup out expect at least 2 groups so no need to check
cv = LeaveOneOut()
X, y = [[1]], [0] # 1 sample
with pytest.raises(
ValueError,
match='Cannot perform LeaveOneOut with n_samples=1'):
next(cv.split(X, y))
def test_leave_p_out_empty_trainset():
# No need to check LeavePGroupsOut
cv = LeavePOut(p=2)
X, y = [[1], [2]], [0, 3] # 2 samples
with pytest.raises(
ValueError,
match='p=2 must be strictly less than the number of samples=2'):
next(cv.split(X, y, groups=[1, 2]))
@pytest.mark.parametrize('Klass', (KFold, StratifiedKFold))
def test_random_state_shuffle_false(Klass):
# passing a non-default random_state when shuffle=False makes no sense
with pytest.raises(ValueError,
match='has no effect since shuffle is False'):
Klass(3, shuffle=False, random_state=0)
@pytest.mark.parametrize('cv, expected', [
(KFold(), True),
(KFold(shuffle=True, random_state=123), True),
(StratifiedKFold(), True),
(StratifiedKFold(shuffle=True, random_state=123), True),
(RepeatedKFold(random_state=123), True),
(RepeatedStratifiedKFold(random_state=123), True),
(ShuffleSplit(random_state=123), True),
(GroupShuffleSplit(random_state=123), True),
(StratifiedShuffleSplit(random_state=123), True),
(GroupKFold(), True),
(TimeSeriesSplit(), True),
(LeaveOneOut(), True),
(LeaveOneGroupOut(), True),
(LeavePGroupsOut(n_groups=2), True),
(LeavePOut(p=2), True),
(KFold(shuffle=True, random_state=None), False),
(KFold(shuffle=True, random_state=None), False),
(StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)),
False),
(StratifiedKFold(shuffle=True, random_state=np.random.RandomState(0)),
False),
(RepeatedKFold(random_state=None), False),
(RepeatedKFold(random_state=np.random.RandomState(0)), False),
(RepeatedStratifiedKFold(random_state=None), False),
(RepeatedStratifiedKFold(random_state=np.random.RandomState(0)), False),
(ShuffleSplit(random_state=None), False),
(ShuffleSplit(random_state=np.random.RandomState(0)), False),
(GroupShuffleSplit(random_state=None), False),
(GroupShuffleSplit(random_state=np.random.RandomState(0)), False),
(StratifiedShuffleSplit(random_state=None), False),
(StratifiedShuffleSplit(random_state=np.random.RandomState(0)), False),
])
def test_yields_constant_splits(cv, expected):
assert _yields_constant_splits(cv) == expected
|
bsd-3-clause
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/examples/plot_IPSE.py
|
1
|
1288
|
#!/usr/bin/env python3
"""This is code to plot the traces used in a IPSE. Note that it is not really *correct*. This code should be moved into the IPSE class (under interferometry), and then called as a method."""
import numpy as np
import matplotlib.pyplot as plt
import h5py
from LoLIM.utilities import processed_data_dir, v_air, SId_to_Sname, Sname_to_SId_dict, RTD, even_antName_to_odd
from LoLIM.interferometry import read_interferometric_PSE as R_IPSE
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/exp_app2/appexp1/public/raw_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
timeID = "D20170929T202255.000Z"
input_folder = "interferometry_out4"#4_tstNORMAL"interferometry_out4_sumLog
IPSE_index = 55700
block = int(IPSE_index/100)
processed_data_folder = processed_data_dir(timeID)
data_dir = processed_data_folder + "/" + input_folder
interferometry_header, IPSE_list = R_IPSE.load_interferometric_PSE( data_dir, blocks_to_open=[block] )
IPSE = R_IPSE.get_IPSE(IPSE_list, IPSE_index)
print(IPSE.unique_index)
print("XYZT:", IPSE.XYZT)
print("intensity:", IPSE.intensity, "S1 S2 distance:", IPSE.S1_S2_distance)
print("amplitude:", IPSE.amplitude)
IPSE.plot()
|
mit
|
baojiwei/WavelengthCalibration
|
SinglePeak.py
|
1
|
1344
|
# -*- uft-8 -*-
import matplotlib
class SinglePeak():
"""Creat a class as a Peak Container"""
def __init__(self,StandardPeak):
self.StandardPeak=StandardPeak
self.Indicator=False
self.RecordIntegrationTime=0
self.MaxCounts=0
self.PeakPixel=0
self.pixel=[]
self.CountsRange=[20000,50000]
def GetPixel(self,Wavelength):
# get pixel of the peak
Wavelength=list(Wavelength)
for i in range(0,len(Wavelength)-1):
if Wavelength[i]<self.StandardPeak-5:
a=i
if Wavelength[i]<self.StandardPeak+5:
b=i+1
self.pixel=[a,b]
self.Wavelength=Wavelength[a:b] # get wavelength of the peak
return self.pixel
def GetSingleSpectrum(self,Spectrum):
# get spectrum of the peak
Spectrum=list(Spectrum)
self.Spectrum=Spectrum[self.pixel[0]:self.pixel[1]]
return self.Spectrum
def GetMaxCounts(self):
# get max counts of the peak
for i in range(0,len(self.Spectrum)-1):
if self.MaxCounts<self.Spectrum[i]:
self.MaxCounts=self.Spectrum[i]
self.MaxCountsPixel=i+self.pixel[0]
return self.MaxCounts
def UpdateIndicator(self,CountsRange):
# if MaxCounts is between CountsRange[0] and CountsRange[1]
# then Indicator = True pass the check
self.CountsRange=CountsRange
if self.MaxCounts>self.CountsRange[0] and self.MaxCounts<self.CountsRange[1]:
self.Indicator=True
return self.Indicator
|
mit
|
tylerjereddy/scipy
|
scipy/special/orthogonal.py
|
7
|
63520
|
"""
A collection of functions to find the weights and abscissas for
Gaussian Quadrature.
These calculations are done by finding the eigenvalues of a
tridiagonal matrix whose entries are dependent on the coefficients
in the recursion formula for the orthogonal polynomials with the
corresponding weighting function over the interval.
Many recursion relations for orthogonal polynomials are given:
.. math::
a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
The recursion relation of interest is
.. math::
P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
where :math:`P` has a different normalization than :math:`f`.
The coefficients can be found as:
.. math::
A_n = -a2n / a3n
\\qquad
B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
where
.. math::
h_n = \\int_a^b w(x) f_n(x)^2
assume:
.. math::
P_0 (x) = 1
\\qquad
P_{-1} (x) == 0
For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
[abramowitz.stegun-1965]_.
References
----------
.. [golub.welsch-1969-mathcomp]
Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
.. [abramowitz.stegun-1965]
Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
Mathematical Functions: with Formulas, Graphs, and Mathematical
Tables*. Gaithersburg, MD: National Bureau of Standards.
http://www.math.sfu.ca/~cbm/aands/
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
#
# Author: Travis Oliphant 2000
# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
# SciPy imports.
import numpy as np
from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around,
hstack, arccos, arange)
from scipy import linalg
from scipy.special import airy
# Local imports.
from . import _ufuncs
from . import _ufuncs as cephes
_gam = cephes.gamma
from . import specfun
_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite',
'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
'sh_chebyu', 'sh_jacobi']
# Correspondence between new and old names of root functions
_rootfuns_map = {'roots_legendre': 'p_roots',
'roots_chebyt': 't_roots',
'roots_chebyu': 'u_roots',
'roots_chebyc': 'c_roots',
'roots_chebys': 's_roots',
'roots_jacobi': 'j_roots',
'roots_laguerre': 'l_roots',
'roots_genlaguerre': 'la_roots',
'roots_hermite': 'h_roots',
'roots_hermitenorm': 'he_roots',
'roots_gegenbauer': 'cg_roots',
'roots_sh_legendre': 'ps_roots',
'roots_sh_chebyt': 'ts_roots',
'roots_sh_chebyu': 'us_roots',
'roots_sh_jacobi': 'js_roots'}
_evalfuns = ['eval_legendre', 'eval_chebyt', 'eval_chebyu',
'eval_chebyc', 'eval_chebys', 'eval_jacobi',
'eval_laguerre', 'eval_genlaguerre', 'eval_hermite',
'eval_hermitenorm', 'eval_gegenbauer',
'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu',
'eval_sh_jacobi']
__all__ = _polyfuns + list(_rootfuns_map.keys()) + _evalfuns + ['poch', 'binom']
class orthopoly1d(np.poly1d):
def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
limits=None, monic=False, eval_func=None):
equiv_weights = [weights[k] / wfunc(roots[k]) for
k in range(len(roots))]
mu = sqrt(hn)
if monic:
evf = eval_func
if evf:
knn = kn
eval_func = lambda x: evf(x) / knn
mu = mu / abs(kn)
kn = 1.0
# compute coefficients from roots, then scale
poly = np.poly1d(roots, r=True)
np.poly1d.__init__(self, poly.coeffs * float(kn))
self.weights = np.array(list(zip(roots, weights, equiv_weights)))
self.weight_func = wfunc
self.limits = limits
self.normcoef = mu
# Note: eval_func will be discarded on arithmetic
self._eval_func = eval_func
def __call__(self, v):
if self._eval_func and not isinstance(v, np.poly1d):
return self._eval_func(v)
else:
return np.poly1d.__call__(self, v)
def _scale(self, p):
if p == 1.0:
return
self._coeffs *= p
evf = self._eval_func
if evf:
self._eval_func = lambda x: evf(x) * p
self.normcoef *= p
def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal
interval
"""
k = np.arange(n, dtype='d')
c = np.zeros((2, n))
c[0,1:] = bn_func(k[1:])
c[1,:] = an_func(k)
x = linalg.eigvals_banded(c, overwrite_a_band=True)
# improve roots by one application of Newton's method
y = f(n, x)
dy = df(n, x)
x -= y/dy
fm = f(n-1, x)
fm /= np.abs(fm).max()
dy /= np.abs(dy).max()
w = 1.0 / (fm * dy)
if symmetrize:
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
w *= mu0 / w.sum()
if mu:
return x, w, mu0
else:
return x, w
# Jacobi Polynomials 1 P^(alpha,beta)_n(x)
def roots_jacobi(n, alpha, beta, mu=False):
r"""Gauss-Jacobi quadrature.
Compute the sample points and weights for Gauss-Jacobi
quadrature. The sample points are the roots of the nth degree
Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x)^{\alpha} (1 +
x)^{\beta}`. See 22.2.1 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
beta : float
beta must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha <= -1 or beta <= -1:
raise ValueError("alpha and beta must be greater than -1.")
if alpha == 0.0 and beta == 0.0:
return roots_legendre(m, mu)
if alpha == beta:
return roots_gegenbauer(m, alpha+0.5, mu)
mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1)
a = alpha
b = beta
if a + b == 0.0:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)
else:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),
(b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))
bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \
* np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))
f = lambda n, x: cephes.eval_jacobi(n, a, b, x)
df = lambda n, x: 0.5 * (n + a + b + 1) \
* cephes.eval_jacobi(n-1, a+1, b+1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def jacobi(n, alpha, beta, monic=False):
r"""Jacobi polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)}
+ (\beta - \alpha - (\alpha + \beta + 2)x)
\frac{d}{dx}P_n^{(\alpha, \beta)}
+ n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0
for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a
polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
beta : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Jacobi polynomial.
Notes
-----
For fixed :math:`\alpha, \beta`, the polynomials
:math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
x, w, mu = roots_jacobi(n, alpha, beta, mu=True)
ab1 = alpha + beta + 1.0
hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
# here kn = coefficient on x^n term
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_jacobi(n, alpha, beta, x))
return p
# Jacobi Polynomials shifted G_n(p,q,x)
def roots_sh_jacobi(n, p1, q1, mu=False):
"""Gauss-Jacobi (shifted) quadrature.
Compute the sample points and weights for Gauss-Jacobi (shifted)
quadrature. The sample points are the roots of the nth degree
shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[0, 1]` with
weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2
in [AS]_ for details.
Parameters
----------
n : int
quadrature order
p1 : float
(p1 - q1) must be > -1
q1 : float
q1 must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
if (p1-q1) <= -1 or q1 <= 0:
raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
x, w, m = roots_jacobi(n, p1-q1, q1-1, True)
x = (x + 1) / 2
scale = 2.0**p1
w /= scale
m /= scale
if mu:
return x, w, m
else:
return x, w
def sh_jacobi(n, p, q, monic=False):
r"""Shifted Jacobi polynomial.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial.
p : float
Parameter, must have :math:`p > q - 1`.
q : float
Parameter, must be greater than 0.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
G : orthopoly1d
Shifted Jacobi polynomial.
Notes
-----
For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are
orthogonal over :math:`[0, 1]` with weight function :math:`(1 -
x)^{p - q}x^{q - 1}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
n1 = n
x, w = roots_sh_jacobi(n1, p, q)
hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
hn /= (2 * n + p) * (_gam(2 * n + p)**2)
# kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
kn = 1.0
pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_jacobi(n, p, q, x))
return pp
# Generalized Laguerre L^(alpha)_n(x)
def roots_genlaguerre(n, alpha, mu=False):
r"""Gauss-generalized Laguerre quadrature.
Compute the sample points and weights for Gauss-generalized
Laguerre quadrature. The sample points are the roots of the nth
degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0,
\infty]` with weight function :math:`w(x) = x^{\alpha}
e^{-x}`. See 22.3.9 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -1:
raise ValueError("alpha must be greater than -1.")
mu0 = cephes.gamma(alpha + 1)
if m == 1:
x = np.array([alpha+1.0], 'd')
w = np.array([mu0], 'd')
if mu:
return x, w, mu0
else:
return x, w
an_func = lambda k: 2 * k + alpha + 1
bn_func = lambda k: -np.sqrt(k * (k + alpha))
f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x)
df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x)
- (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def genlaguerre(n, alpha, monic=False):
r"""Generalized (associated) Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n^{(\alpha)}
+ (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)}
+ nL_n^{(\alpha)} = 0,
where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Generalized Laguerre polynomial.
Notes
-----
For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}`
are orthogonal over :math:`[0, \infty)` with weight function
:math:`e^{-x}x^\alpha`.
The Laguerre polynomials are the special case where :math:`\alpha
= 0`.
See Also
--------
laguerre : Laguerre polynomial.
"""
if alpha <= -1:
raise ValueError("alpha must be > -1")
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_genlaguerre(n1, alpha)
wfunc = lambda x: exp(-x) * x**alpha
if n == 0:
x, w = [], []
hn = _gam(n + alpha + 1) / _gam(n + 1)
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
lambda x: eval_genlaguerre(n, alpha, x))
return p
# Laguerre L_n(x)
def roots_laguerre(n, mu=False):
r"""Gauss-Laguerre quadrature.
Compute the sample points and weights for Gauss-Laguerre
quadrature. The sample points are the roots of the nth degree
Laguerre polynomial, :math:`L_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[0, \infty]` with weight function
:math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.laguerre.laggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return roots_genlaguerre(n, 0.0, mu=mu)
def laguerre(n, monic=False):
r"""Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0;
:math:`L_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Laguerre Polynomial.
Notes
-----
The polynomials :math:`L_n` are orthogonal over :math:`[0,
\infty)` with weight function :math:`e^{-x}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_laguerre(n1)
if n == 0:
x, w = [], []
hn = 1.0
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
lambda x: eval_laguerre(n, x))
return p
# Hermite 1 H_n(x)
def roots_hermite(n, mu=False):
r"""Gauss-Hermite (physicist's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`H_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is applied
which computes nodes and weights in a numerically stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite.hermgauss
roots_hermitenorm
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi)
if n <= 150:
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k/2.0)
f = cephes.eval_hermite
df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def _compute_tauk(n, k, maxit=5):
"""Helper function for Tricomi initial guesses
For details, see formula 3.1 in lemma 3.1 in the
original paper.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots :math:`\tau_k` to compute
maxit : int
Number of Newton maxit performed, the default
value of 5 is sufficient.
Returns
-------
tauk : ndarray
Roots of equation 3.1
See Also
--------
initial_nodes_a
roots_hermite_asy
"""
a = n % 2 - 0.5
c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0)
f = lambda x: x - sin(x) - c
df = lambda x: 1.0 - cos(x)
xi = 0.5*pi
for i in range(maxit):
xi = xi - f(xi)/df(xi)
return xi
def _initial_nodes_a(n, k):
r"""Tricomi initial guesses
Computes an initial approximation to the square of the `k`-th
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.1 in the
original paper. The guesses are accurate except in the region
near :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate roots
See Also
--------
initial_nodes
roots_hermite_asy
"""
tauk = _compute_tauk(n, k)
sigk = cos(0.5*tauk)**2
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Initial approximation of Hermite roots (square)
xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25)
return xksq
def _initial_nodes_b(n, k):
r"""Gatteschi initial guesses
Computes an initial approximation to the square of the kth
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.2 in the
original paper. The guesses are accurate in the region just
below :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate root
See Also
--------
initial_nodes
roots_hermite_asy
"""
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Airy roots by approximation
ak = specfun.airyzo(k.max(), 1)[0][::-1]
# Initial approximation of Hermite roots (square)
xksq = (nu +
2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +
1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +
(9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +
(16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -
(15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))
return xksq
def _initial_nodes(n):
"""Initial guesses for the Hermite roots
Computes an initial approximation to the non-negative
roots :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The Tricomi and Gatteschi initial
guesses are used in the region where they are accurate.
Parameters
----------
n : int
Quadrature order
Returns
-------
xk : ndarray
Approximate roots
See Also
--------
roots_hermite_asy
"""
# Turnover point
# linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules
fit = 0.49082003*n - 4.37859653
turnover = around(fit).astype(int)
# Compute all approximations
ia = arange(1, int(floor(n*0.5)+1))
ib = ia[::-1]
xasq = _initial_nodes_a(n, ia[:turnover+1])
xbsq = _initial_nodes_b(n, ib[turnover+1:])
# Combine
iv = sqrt(hstack([xasq, xbsq]))
# Central node is always zero
if n % 2 == 1:
iv = hstack([0.0, iv])
return iv
def _pbcf(n, theta):
r"""Asymptotic series expansion of parabolic cylinder function
The implementation is based on sections 3.2 and 3.3 from the
original paper. Compared to the published version this code
adds one more term to the asymptotic series. The detailed
formulas can be found at [parabolic-asymptotics]_. The evaluation
is done in a transformed variable :math:`\theta := \arccos(t)`
where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
theta : ndarray
Transformed position variable
Returns
-------
U : ndarray
Value of the parabolic cylinder function :math:`U(a, \theta)`.
Ud : ndarray
Value of the derivative :math:`U^{\prime}(a, \theta)` of
the parabolic cylinder function.
See Also
--------
roots_hermite_asy
References
----------
.. [parabolic-asymptotics]
https://dlmf.nist.gov/12.10#vii
"""
st = sin(theta)
ct = cos(theta)
# https://dlmf.nist.gov/12.10#vii
mu = 2.0*n + 1.0
# https://dlmf.nist.gov/12.10#E23
eta = 0.5*theta - 0.5*st*ct
# https://dlmf.nist.gov/12.10#E39
zeta = -(3.0*eta/2.0) ** (2.0/3.0)
# https://dlmf.nist.gov/12.10#E40
phi = (-zeta / st**2) ** (0.25)
# Coefficients
# https://dlmf.nist.gov/12.10#E43
a0 = 1.0
a1 = 0.10416666666666666667
a2 = 0.08355034722222222222
a3 = 0.12822657455632716049
a4 = 0.29184902646414046425
a5 = 0.88162726744375765242
b0 = 1.0
b1 = -0.14583333333333333333
b2 = -0.09874131944444444444
b3 = -0.14331205391589506173
b4 = -0.31722720267841354810
b5 = -0.94242914795712024914
# Polynomials
# https://dlmf.nist.gov/12.10#E9
# https://dlmf.nist.gov/12.10#E10
ctp = ct ** arange(16).reshape((-1,1))
u0 = 1.0
u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0
u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0
u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0
u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0
u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:]
- 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0
v0 = 1.0
v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0
v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0
v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0
v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0
v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:]
+ 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0
# Airy Evaluation (Bi and Bip unused)
Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta)
# Prefactor for U
P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi
# Terms for U
# https://dlmf.nist.gov/12.10#E42
phip = phi ** arange(6, 31, 6).reshape((-1,1))
A0 = b0*u0
A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3
A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6
B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2
B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5
B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8
# U
# https://dlmf.nist.gov/12.10#E35
U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) +
Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0))
# Prefactor for derivative of U
Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi
# Terms for derivative of U
# https://dlmf.nist.gov/12.10#E46
C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta
C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4
C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7
D0 = a0*v0
D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3
D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6
# Derivative of U
# https://dlmf.nist.gov/12.10#E36
Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) +
Aip * (D0 + D1/mu**2.0 + D2/mu**4.0))
return U, Ud
def _newton(n, x_initial, maxit=5):
"""Newton iteration for polishing the asymptotic approximation
to the zeros of the Hermite polynomials.
Parameters
----------
n : int
Quadrature order
x_initial : ndarray
Initial guesses for the roots
maxit : int
Maximal number of Newton iterations.
The default 5 is sufficient, usually
only one or two steps are needed.
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite_asy
"""
# Variable transformation
mu = sqrt(2.0*n + 1.0)
t = x_initial / mu
theta = arccos(t)
# Newton iteration
for i in range(maxit):
u, ud = _pbcf(n, theta)
dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)
theta = theta + dtheta
if max(abs(dtheta)) < 1e-14:
break
# Undo variable transformation
x = mu * cos(theta)
# Central node is always zero
if n % 2 == 1:
x[0] = 0.0
# Compute weights
w = exp(-x**2) / (2.0*ud**2)
return x, w
def _roots_hermite_asy(n):
r"""Gauss-Hermite (physicist's) quadrature for large n.
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the nth degree Hermite polynomial,
:math:`H_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2n - 1` or less over the interval
:math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`.
This method relies on asymptotic expansions which work best for n > 150.
The algorithm has linear runtime making computation for very large n
feasible.
Parameters
----------
n : int
quadrature order
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
iv = _initial_nodes(n)
nodes, weights = _newton(n, iv)
# Combine with negative parts
if n % 2 == 0:
nodes = hstack([-nodes[::-1], nodes])
weights = hstack([weights[::-1], weights])
else:
nodes = hstack([-nodes[-1:0:-1], nodes])
weights = hstack([weights[-1:0:-1], weights])
# Scale weights
weights *= sqrt(pi) / sum(weights)
return nodes, weights
def hermite(n, monic=False):
r"""Physicist's Hermite polynomial.
Defined by
.. math::
H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
H : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p_monic = special.hermite(3, monic=True)
>>> p_monic
poly1d([ 1. , 0. , -1.5, 0. ])
>>> p_monic(1)
-0.49999999999999983
>>> x = np.linspace(-3, 3, 400)
>>> y = p_monic(x)
>>> plt.plot(x, y)
>>> plt.title("Monic Hermite polynomial of degree 3")
>>> plt.xlabel("x")
>>> plt.ylabel("H_3(x)")
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermite(n1)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: eval_hermite(n, x))
return p
# Hermite 2 He_n(x)
def roots_hermitenorm(n, mu=False):
r"""Gauss-Hermite (statistician's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`He_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is used
which computes nodes and weights in a numerical stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite_e.hermegauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(2.0*np.pi)
if n <= 150:
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k)
f = cephes.eval_hermitenorm
df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
# Transform
nodes *= sqrt(2)
weights *= sqrt(2)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def hermitenorm(n, monic=False):
r"""Normalized (probabilist's) Hermite polynomial.
Defined by
.. math::
He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
He : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`He_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2/2}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermitenorm(n1)
wfunc = lambda x: exp(-x * x / 2.0)
if n == 0:
x, w = [], []
hn = sqrt(2 * pi) * _gam(n + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
eval_func=lambda x: eval_hermitenorm(n, x))
return p
# The remainder of the polynomials can be derived from the ones above.
# Ultraspherical (Gegenbauer) C^(alpha)_n(x)
def roots_gegenbauer(n, alpha, mu=False):
r"""Gauss-Gegenbauer quadrature.
Compute the sample points and weights for Gauss-Gegenbauer
quadrature. The sample points are the roots of the nth degree
Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See
22.2.3 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -0.5
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -0.5:
raise ValueError("alpha must be greater than -0.5.")
elif alpha == 0.0:
# C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
# strictly, we should just error out here, since the roots are not
# really defined, but we used to return something useful, so let's
# keep doing so.
return roots_chebyt(n, mu)
mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1)
an_func = lambda k: 0.0 * k
bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)
/ (4 * (k + alpha) * (k + alpha - 1)))
f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x)
df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x)
+ (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def gegenbauer(n, alpha, monic=False):
r"""Gegenbauer (ultraspherical) polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)}
- (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)}
+ n(n + 2\alpha)C_n^{(\alpha)} = 0
for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -0.5.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Gegenbauer polynomial.
Notes
-----
The polynomials :math:`C_n^{(\alpha)}` are orthogonal over
:math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha -
1/2)}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
We can initialize a variable ``p`` as a Gegenbauer polynomial using the
`gegenbauer` function and evaluate at a point ``x = 1``.
>>> p = special.gegenbauer(3, 0.5, monic=False)
>>> p
poly1d([ 2.5, 0. , -1.5, 0. ])
>>> p(1)
1.0
To evaluate ``p`` at various points ``x`` in the interval ``(-3, 3)``,
simply pass an array ``x`` to ``p`` as follows:
>>> x = np.linspace(-3, 3, 400)
>>> y = p(x)
We can then visualize ``x, y`` using `matplotlib.pyplot`.
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y)
>>> ax.set_title("Gegenbauer (ultraspherical) polynomial of degree 3")
>>> ax.set_xlabel("x")
>>> ax.set_ylabel("G_3(x)")
>>> plt.show()
"""
base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
if monic:
return base
# Abrahmowitz and Stegan 22.5.20
factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
_gam(2*alpha) / _gam(alpha + 0.5 + n))
base._scale(factor)
base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x)
return base
# Chebyshev of the first kind: T_n(x) =
# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
# Computed anew.
def roots_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`T_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.chebyshev.chebgauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m))
w = np.full_like(x, pi/m)
if mu:
return x, w, pi
else:
return x, w
def chebyt(n, monic=False):
r"""Chebyshev polynomial of the first kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0;
:math:`T_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{-1/2}`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 1.0 / sqrt(1 - x * x)
if n == 0:
return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
n1 = n
x, w, mu = roots_chebyt(n1, mu=True)
hn = pi / 2
kn = 2**(n - 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
return p
# Chebyshev of the second kind
# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
def roots_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`U_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in
[AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
t = np.arange(m, 0, -1) * pi / (m + 1)
x = np.cos(t)
w = pi * np.sin(t)**2 / (m + 1)
if mu:
return x, w, pi / 2
else:
return x, w
def chebyu(n, monic=False):
r"""Chebyshev polynomial of the second kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n
+ n(n + 2)U_n = 0;
:math:`U_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{1/2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
"""
base = jacobi(n, 0.5, 0.5, monic=monic)
if monic:
return base
factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
base._scale(factor)
return base
# Chebyshev of the first kind C_n(x)
def roots_chebyc(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`C_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See
22.2.6 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyt(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebyc(n, monic=False):
r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the
nth Chebychev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`1/\sqrt{1 - (x/2)^2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebyc(n1)
if n == 0:
x, w = [], []
hn = 4 * pi * ((n == 0) + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
p._scale(2.0 / p(2))
p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x)
return p
# Chebyshev of the second kind S_n(x)
def roots_chebys(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`S_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebys(n, monic=False):
r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the
nth Chebychev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
S : orthopoly1d
Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`\sqrt{1 - (x/2)}^2`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebys(n1)
if n == 0:
x, w = [], []
hn = pi
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
factor = (n + 1.0) / p(2)
p._scale(factor)
p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x)
return p
# Shifted Chebyshev of the first kind T^*_n(x)
def roots_sh_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind, shifted) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
xw = roots_chebyt(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyt(n, monic=False):
r"""Shifted Chebyshev polynomial of the first kind.
Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth
Chebyshev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Shifted Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{-1/2}`.
"""
base = sh_jacobi(n, 0.0, 0.5, monic=monic)
if monic:
return base
if n > 0:
factor = 4**n / 2.0
else:
factor = 1.0
base._scale(factor)
return base
# Shifted Chebyshev of the second kind U^*_n(x)
def roots_sh_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind, shifted) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in
[AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x = (x + 1) / 2
m_us = cephes.beta(1.5, 1.5)
w *= m_us / m
if mu:
return x, w, m_us
else:
return x, w
def sh_chebyu(n, monic=False):
r"""Shifted Chebyshev polynomial of the second kind.
Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth
Chebyshev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Shifted Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{1/2}`.
"""
base = sh_jacobi(n, 2.0, 1.5, monic=monic)
if monic:
return base
factor = 4**n
base._scale(factor)
return base
# Legendre
def roots_legendre(n, mu=False):
r"""Gauss-Legendre quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature. The sample points are the roots of the nth degree
Legendre polynomial :math:`P_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-1, 1]` with weight function
:math:`w(x) = 1.0`. See 2.2.10 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.legendre.leggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = 2.0
an_func = lambda k: 0.0 * k
bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))
f = cephes.eval_legendre
df = lambda n, x: (-n*x*cephes.eval_legendre(n, x)
+ n*cephes.eval_legendre(n-1, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def legendre(n, monic=False):
r"""Legendre polynomial.
Defined to be the solution of
.. math::
\frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right]
+ n(n + 1)P_n(x) = 0;
:math:`P_n(x)` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Legendre polynomial.
Notes
-----
The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]`
with weight function 1.
Examples
--------
Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
>>> from scipy.special import legendre
>>> legendre(3)
poly1d([ 2.5, 0. , -1.5, 0. ])
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_legendre(n1)
if n == 0:
x, w = [], []
hn = 2.0 / (2 * n + 1)
kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
monic=monic, eval_func=lambda x: eval_legendre(n, x))
return p
# Shifted Legendre P^*_n(x)
def roots_sh_legendre(n, mu=False):
r"""Gauss-Legendre (shifted) quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature. The sample points are the roots of the nth degree
shifted Legendre polynomial :math:`P^*_n(x)`. These sample points
and weights correctly integrate polynomials of degree :math:`2n -
1` or less over the interval :math:`[0, 1]` with weight function
:math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w = roots_legendre(n)
x = (x + 1) / 2
w /= 2
if mu:
return x, w, 1.0
else:
return x, w
def sh_legendre(n, monic=False):
r"""Shifted Legendre polynomial.
Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth
Legendre polynomial.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Shifted Legendre polynomial.
Notes
-----
The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]`
with weight function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 0.0 * x + 1.0
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
lambda x: eval_sh_legendre(n, x))
x, w = roots_sh_legendre(n)
hn = 1.0 / (2 * n + 1.0)
kn = _gam(2 * n + 1) / _gam(n + 1)**2
p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_legendre(n, x))
return p
# -----------------------------------------------------------------------------
# Code for backwards compatibility
# -----------------------------------------------------------------------------
# Import functions in case someone is still calling the orthogonal
# module directly. (They shouldn't be; it's not in the public API).
poch = cephes.poch
# eval_chebyu, eval_sh_chebyt and eval_sh_chebyu: These functions are not
# used in orthogonal.py, they are not in _rootfuns_map, but their names
# do appear in _evalfuns, so they must be kept.
from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer,
eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc,
eval_sh_chebyt, eval_sh_chebyu, eval_legendre,
eval_sh_legendre, eval_genlaguerre, eval_laguerre,
eval_hermite, eval_hermitenorm)
# Make the old root function names an alias for the new ones
_modattrs = globals()
for newfun, oldfun in _rootfuns_map.items():
_modattrs[oldfun] = _modattrs[newfun]
__all__.append(oldfun)
|
bsd-3-clause
|
airanmehr/bio
|
Scripts/KyrgysHAPH/Analysis/newRound.py
|
1
|
7307
|
'''
Copyleft May 03, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os, sys
import Utils.Util as utl
import Utils.Plots as pplt
import Scripts.KyrgysHAPH.Plot as kplt
import Scripts.KyrgysHAPH.Util as kutl
if False:
freqs=pd.read_pickle(kutl.path+'data/KGZ+EAS.aa.df');
panel=pd.read_pickle(utl.dataPath+'Human/scan/selscan/PANEL.idf');
snpMatrix=pd.read_pickle(kutl.path+'data/No-HAPH+HAPH.hap.aa.df');
ann=kutl.Annotation.load()
Fisher=kutl.FishePval(freqs=freqs,snpMatrix=snpMatrix);
pc=utl.pcaX(panel['No-HAPH'].unstack('method').dropna(),2)
Genes=kutl.Gene(ann)
kgz=kplt.KGZ(freqs=freqs,snpMatrix=snpMatrix,Genes=Genes,Fisher=Fisher,X=panel,pc=pc,save=True)
def getSFS():
"""
This function reads XP0 file SFS scan for which populaitons nonpolymorphic sites are removed.
In this scan only pairwise m is comparable.
So we select intervals with m differes the most between HAPH.No-HAPH and Hyper.Normo and m differs the leaset between No-HAPH.Normo
"""
i=('No-HAPH','HAPH');j=('Normo','Hyper');k=('Normo','No-HAPH')
a=utl.filterGap2(pd.read_pickle(kutl.path+'scan/SFS/XP0.df').loc[50].loc[:, kutl.POPSKGZ[3:]].unstack(2), 19).stack()
M=pd.concat(map(lambda y: a.apply(lambda x: a[y]-x),a.columns),1,keys=a.columns).unstack(level=2)
a=utl.filterGap2(pd.read_pickle(kutl.path+'scan/SFS/XP1.df').loc[50].loc[:, kutl.POPSKGZ[3:]].unstack(2), 19).stack()
d=pd.concat(map(lambda y: a.apply(lambda x: a[y]-x),a.columns),1,keys=a.columns).unstack(level=2)
x='SFSelect'
regs=[]
for x in ['SFSelect','D','H','m']:
print x
m=(d.xs(x,1,2),M.xs(x,1,2))[ x =='m']
if x =='SFSelect':m=m.dropna()
regs+=[m.loc[(m[j].abs()+m[i].abs()-m[k].abs()).sort_values().index].iloc[-500:][i].rename(x)]
return pd.concat(regs,1)
def getSelscan():
a=utl.filterGap2(pd.concat(map(lambda x: pd.read_pickle(utl.dataPath+'/Human/scan/selscan/{}.idf'.format(x)), kutl.POPSKGZ), 1).sort_index(1), 19, 25000)
b=pd.read_pickle(utl.dataPath+'Human/scan/selscan/No-HAPH.df')[('xpehh','No-HAPH','HAPH')].dropna().abs()
r=utl.BED.getIntervals(b.sort_values().iloc[-5000:],padding=50000,agg='max').sort_values('len');r=r[r.len!=100000].sort_values('len').reset_index()
r.to_pickle(kutl.path+'scan/intervals.xpehh.df')
b=[(a[(m,c[0],'NA')]-a[(m,c[1],'NA')]).rename((m,)+c) for m in ['ihs','nsl'] for c in a['xpehh']]
a=pd.concat([a[['xpehh']],pd.concat(b,1)],1).sort_index(1)
a=a.apply(lambda x: x.dropna().sort_values().iloc[-200:])
return a.loc[:,pd.IndexSlice[['xpehh','ihs','nsl'],['KGZ','Healthy','No-HAPH','Normo']]]
def get_intervals():
try:
regs = pd.read_pickle(kutl.path+'scan/intervals.df')
except:
a=getSFS();b=getSelscan()
a.columns=pd.MultiIndex.from_product([a.columns.values,['No-HAPH'],['HAPH']],names=b.columns.names)
regs = pd.concat([a,b],1).sort_index(1)
regs.to_pickle(kutl.path+'scan/intervals.df')
return regs
def plotAll(I,kgz):
plt.ioff()
for id,i in I.iterrows():
kgz.plotMeeting(i,save=True);
plt.close('all')
plt.ion()
def analyze():
regions=get_intervals()
I=[]
for j in range(regions.shape[1]):
print regions.columns[j]
o= regions.iloc[:,j].dropna()
regs=utl.BED.getIntervals(o,35000).reset_index();regs['method']=o.name[0];regs['pop']=o.name[1];regs['popxp']=o.name[2]
regs['winmax']=regs.apply(lambda row: utl.mask(o.dropna(),interval=row).idxmax(),1)
intervals=Fisher.compute(regs.iloc[:],minDAF=-1,maxPval=0.05)
if not intervals.shape[0]: continue
g=Genes.getGenes(intervals,onlyExome=True,pad=2000)
intervals=intervals.set_index('ID').join(g.groupby(level=0).apply(lambda x: ', '.join(x.tolist())).rename('Genes')).sort_values('dDAF',ascending=False)
I+=[intervals]
I=pd.concat(I).dropna()
I.to_pickle(kutl.path+'Analyzed.Intervals.df')
def run():
F=pd.read_pickle(kutl.path+'data/KGZ.aa.df')
reload(kutl);
# I=Fisher.compute(pd.read_pickle(kutl.path+'scan/intervals.xpehh.df'),minDAF=-1,maxPval=0.05).sort_values('Fisher');
# Genes=kutl.Gene(ann);g=Genes.getGenes(I,onlyExome=True,pad=2000);I=I.set_index('ID').join(g.groupby(level=0).apply(lambda x: ', '.join(x.tolist())).rename('Genes'))
# I.to_pickle(kutl.path+'scan/intervals.analyzed.xpehh.df')
I=pd.read_pickle(kutl.path+'scan/intervals.analyzed.xpehh.df').sort_values('len')
a=pd.read_pickle(kutl.path+'scan/Fisher.df').apply(np.log10).abs().round(12)
b=utl.scanGenome(a>2,f=np.sum)
I=utl.BED.getIntervals(b.sort_values().iloc[-200:],50000,'sum').reset_index().sort_values('len').reset_index(drop=True).reset_index().rename(columns={'index':'ID'})
I
g2=Genes.getGenes(I,onlyExome=True,pad=2000);g=g[~g.apply(lambda x: '-'in x)]
g2
a
reload(kplt);
I.apply(lambda x: kplt.locuszoom(None,chrom=int(x.CHROM),start=int(x.start)-100000,end=int(x.end)+100000),1);
I2.apply(lambda x: kplt.locuszoom(None,chrom=int(x.CHROM),start=int(x.start)-100000,end=int(x.end)+100000),1)
g2.apply(kplt.locuszoom)
plt.close('all')
df.plot.scatter(x='phi',y='kappa');df[df.index.get_level_values('POS')==i.bapos].plot.scatter(x='phi',y='kappa',c='r',ax=plt.gca());df[df.index.get_level_values('POS')==55955444].plot.scatter(x='phi',y='kappa',c='g',ax=plt.gca())
pd.read_pickle(kutl.path+'data/KGZ.aa.df')
c=pd.concat([b,freqs],1)
I=pd.read_pickle(kutl.path+'Analyzed.Intervals.df').sort_values('Fisher')
I=I[I.Fisher < 0.05];
I=I[I.dDAF>0.12];
I=I.drop_duplicates(subset='Genes').reset_index()
I['fname']=map(lambda x: 'I'+str(x+1),I.index)
cands=pd.DataFrame([(1,0.55,),(3,0.95,),(4,0.95,),(5,0.55,), (6,0.95,'HAPH') , (8,0.35,'HAPH'),(10,0.65,),(11,0.85,),(13,0.75,),(15,0.75,),(29,0.35,),(36,0.25 )]).fillna('No-HAPH');cands[0]=cands[0].apply(lambda x: 'I{}'.format(x));cands=cands.set_index(0)
kgz.cands=cands
II=I.set_index('fname').loc[ cands.index].reset_index().rename(columns={0:'fname'})
I
for _,i in II.iterrows():
kgz.plotMeeting(i)
plotAll(I.iloc[:],kgz)
I[['fname','CHROM','start','end','len','method','pop','popxp','Fisher','dDAF','Genes']].to_csv(kutl.path+'tables/genes.tsv',sep='\t')
kutl.saveLatex(I)
def runSickHealthy():
a=pd.read_pickle(kutl.path+'scan/Fisher.Healthy.Sick.df').apply(lambda x: -np.log10(x))
b=utl.scanGenome(a<0.01,f={'s':lambda x:x.sum(),'m':lambda x: x.mean()},winSize=100000,nsteps=2)
b
pplt.Manhattan(b.loc[[17]])
b[b.s>100].plot.scatter(x='s',y='m')
utl.BED.getIntervals(b[(b.s<500)&(b.s>400)].s,padding=50000,agg='sum')
a=pd.read_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/scan/selscan/Healthy.df')['xpehh'].dropna()
aaa=utl.scanGenome(a.abs(),f=lambda x: x[x>x.median()].mean(),winSize=200000)
a=a[a.abs()>2]
a.hist(bins=20)
pplt.Manhattan(a.dropna())
pplt.Manhattan(aaa)
|
mit
|
jamescorsini/Kaggle_Titanic
|
myfirstforest.py
|
26
|
4081
|
""" Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
|
mit
|
wlamond/scikit-learn
|
examples/ensemble/plot_partial_dependence.py
|
54
|
4704
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
def main():
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print(" done.")
print('Convenience plot with ``partial_dependence_plots``')
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features,
feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('Custom 3d plot via ``partial_dependence``')
fig = plt.figure()
target_feature = (1, 5)
pdp, axes = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[0].reshape(list(map(np.size, axes))).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
# Needed on Windows because plot_partial_dependence uses multiprocessing
if __name__ == '__main__':
main()
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/core/indexes/accessors.py
|
7
|
7566
|
"""
datetimelike delegation
"""
import numpy as np
from pandas.core.dtypes.common import (
is_period_arraylike,
is_datetime_arraylike, is_integer_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_categorical_dtype,
is_list_like)
from pandas.core.base import PandasDelegate, NoNewAttributesMixin
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas._libs.period import IncompatibleFrequency # noqa
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.algorithms import take_1d
def is_datetimelike(data):
"""
return a boolean if we can be successfully converted to a datetimelike
"""
try:
maybe_to_datetimelike(data)
return True
except (Exception):
pass
return False
def maybe_to_datetimelike(data, copy=False):
"""
return a DelegatedClass of a Series that is datetimelike
(e.g. datetime64[ns],timedelta64[ns] dtype or a Series of Periods)
raise TypeError if this is not possible.
Parameters
----------
data : Series
copy : boolean, default False
copy the input data
Returns
-------
DelegatedClass
"""
from pandas import Series
if not isinstance(data, Series):
raise TypeError("cannot convert an object of type {0} to a "
"datetimelike index".format(type(data)))
index = data.index
name = data.name
orig = data if is_categorical_dtype(data) else None
if orig is not None:
data = orig.values.categories
if is_datetime64_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'),
index, name=name, orig=orig)
elif is_datetime64tz_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer',
ambiguous='infer'),
index, data.name, orig=orig)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(TimedeltaIndex(data, copy=copy,
freq='infer'), index,
name=name, orig=orig)
else:
if is_period_arraylike(data):
return PeriodProperties(PeriodIndex(data, copy=copy), index,
name=name, orig=orig)
if is_datetime_arraylike(data):
return DatetimeProperties(DatetimeIndex(data, copy=copy,
freq='infer'), index,
name=name, orig=orig)
raise TypeError("cannot convert an object of type {0} to a "
"datetimelike index".format(type(data)))
class Properties(PandasDelegate, NoNewAttributesMixin):
def __init__(self, values, index, name, orig=None):
self.values = values
self.index = index
self.name = name
self.orig = orig
self._freeze()
def _delegate_property_get(self, name):
from pandas import Series
result = getattr(self.values, name)
# maybe need to upcast (ints)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
result = np.asarray(result)
# blow up if we operate on categories
if self.orig is not None:
result = take_1d(result, self.orig.cat.codes)
# return the result as a Series, which is by definition a copy
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a property of a datetimelike "
"object are not supported and are discarded. "
"Change values on the original.")
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError("modifications to a property of a datetimelike "
"object are not supported. Change values on the "
"original.")
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.values, name)
result = method(*args, **kwargs)
if not is_list_like(result):
return result
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a method of a datetimelike object "
"are not supported and are discarded. Change "
"values on the original.")
return result
class DatetimeProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pydatetime(self):
return self.values.to_pydatetime()
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_ops,
typ='property')
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_methods,
typ='method')
class TimedeltaProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hours
>>> s.dt.seconds
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pytimedelta(self):
return self.values.to_pytimedelta()
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
return self.values.components.set_index(self.index)
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_ops,
typ='property')
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_methods,
typ='method')
class PeriodProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
PeriodProperties._add_delegate_accessors(
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_ops,
typ='property')
PeriodProperties._add_delegate_accessors(
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_methods,
typ='method')
class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties):
# This class is never instantiated, and exists solely for the benefit of
# the Series.dt class property. For Series objects, .dt will always be one
# of the more specific classes above.
__doc__ = DatetimeProperties.__doc__
|
mit
|
darshanthaker/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py
|
70
|
8970
|
from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
|
agpl-3.0
|
jorge2703/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
254
|
2005
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
Titan-C/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
161
|
1380
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
ashwinidasar/ml_lab_ecsc_306
|
labwork/lab2/sci-learn/linear_regression.py
|
104
|
1936
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
apache-2.0
|
nmartensen/pandas
|
pandas/core/strings.py
|
2
|
59878
|
import numpy as np
from pandas.compat import zip
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_object_dtype,
is_string_like,
is_list_like,
is_scalar,
is_integer,
is_re)
from pandas.core.common import _values_from_object
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.core.base import NoNewAttributesMixin
from pandas.util._decorators import Appender
import re
import pandas._libs.lib as lib
import warnings
import textwrap
import codecs
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
_shared_docs = dict()
def _get_array_list(arr, others):
from pandas.core.series import Series
if len(others) and isinstance(_values_from_object(others)[0],
(list, np.ndarray, Series)):
arrays = [arr] + list(others)
else:
arrays = [arr, others]
return [np.asarray(x, dtype=object) for x in arrays]
def str_cat(arr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
Parameters
----------
others : list-like, or list of list-likes
If None, returns str concatenating strings of the Series
sep : string or None, default None
na_rep : string or None, default None
If None, NA in the series are ignored.
Returns
-------
concat : Series/Index of objects or str
Examples
--------
When ``na_rep`` is `None` (default behavior), NaN value(s)
in the Series are ignored.
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ')
'a b c'
>>> Series(['a','b',np.nan,'c']).str.cat(sep=' ', na_rep='?')
'a b ? c'
If ``others`` is specified, corresponding values are
concatenated with the separator. Result will be a Series of strings.
>>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',')
0 a,A
1 b,B
2 c,C
dtype: object
Otherwise, strings in the Series are concatenated. Result will be a string.
>>> Series(['a', 'b', 'c']).str.cat(sep=',')
'a,b,c'
Also, you can pass a list of list-likes.
>>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',')
0 a,x,1
1 b,y,2
dtype: object
"""
if sep is None:
sep = ''
if others is not None:
arrays = _get_array_list(arr, others)
n = _length_check(arrays)
masks = np.array([isna(x) for x in arrays])
cats = None
if na_rep is None:
na_mask = np.logical_or.reduce(masks, axis=0)
result = np.empty(n, dtype=object)
np.putmask(result, na_mask, np.nan)
notmask = ~na_mask
tuples = zip(*[x[notmask] for x in arrays])
cats = [sep.join(tup) for tup in tuples]
result[notmask] = cats
else:
for i, x in enumerate(arrays):
x = np.where(masks[i], na_rep, x)
if cats is None:
cats = x
else:
cats = cats + sep + x
result = cats
return result
else:
arr = np.asarray(arr, dtype=object)
mask = isna(arr)
if na_rep is None and mask.any():
if sep == '':
na_rep = ''
else:
return sep.join(arr[notna(arr)])
return sep.join(np.where(mask, na_rep, arr))
def _length_check(others):
n = None
for x in others:
try:
if n is None:
n = len(x)
elif len(x) != n:
raise ValueError('All arrays must be same length')
except TypeError:
raise ValueError("Did you mean to supply a `sep` keyword?")
return n
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
Parameters
----------
pat : string, valid regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
counts : Series/Index of integer values
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Return boolean Series/``array`` whether given pattern/regex is
contained in each string in the Series/Index.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
regex : bool, default True
If True use re.search, otherwise use Python in operator
Returns
-------
contained : Series/array of boolean values
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Return boolean Series/``array`` indicating whether each string in the
Series/Index starts with passed pattern. Equivalent to
:meth:`str.startswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
startswith : Series/array of boolean values
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Return boolean Series indicating whether each string in the
Series/Index ends with passed pattern. Equivalent to
:meth:`str.endswith`.
Parameters
----------
pat : string
Character sequence
na : bool, default NaN
Returns
-------
endswith : Series/array of boolean values
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0):
"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
Returns
-------
replaced : Series/Index of objects
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case` or `flags` with a compiled regex will
raise an error.
Examples
--------
When `repl` is a string, every `pat` is replaced as with
:meth:`str.replace`. NaN value(s) in the Series are left as is.
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', 'b')
0 boo
1 buz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
use_re = is_compiled_re or len(pat) > 1 or flags or callable(repl)
if use_re:
n = n if n >= 0 else 0
regex = re.compile(pat, flags=flags)
f = lambda x: regex.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series/Index by indicated number
of times.
Parameters
----------
repeats : int or array
Same value for all (int) or different value per (array)
Returns
-------
repeated : Series/Index of objects
"""
if is_scalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = lib.vec_binop(_values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=None):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values.
as_indexer : DEPRECATED - Keyword is ignored.
Returns
-------
Series/array of boolean values
See Also
--------
contains : analogous, but less strict, relying on re.search instead of
re.match
extract : extract matched groups
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if (as_indexer is False) and (regex.groups > 0):
raise ValueError("as_indexer=False with a pattern with groups is no "
"longer supported. Use '.str.extract(pat)' instead")
elif as_indexer is not None:
# Previously, this keyword was used for changing the default but
# deprecated behaviour. This keyword is now no longer needed.
warnings.warn("'as_indexer' keyword was specified but is ignored "
"(match now returns a boolean indexer by default), "
"and will be removed in a future version.",
FutureWarning, stacklevel=3)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object)
def str_extract(arr, pat, flags=0, expand=None):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat.
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
expand : bool, default False
* If True, return DataFrame.
* If False, return Series/Index/DataFrame.
.. versionadded:: 0.18.0
Returns
-------
DataFrame with one row for each subject string, and one column for
each group. Any capture group names in regular expression pat will
be used for column names; otherwise capture group numbers will be
used. The dtype of each result column is always object, even when
no match is found. If expand=False and pat has only one capture group,
then return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : returns all matches (not just the first match)
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = Series(['a1', 'b2', 'c3'])
>>> s.str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract('[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract('[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if expand is None:
warnings.warn(
"currently extract(expand=None) " +
"means expand=False (return Index/Series/DataFrame) " +
"but in a future version of pandas this will be changed " +
"to expand=True (return DataFrame)",
FutureWarning,
stacklevel=3)
expand = False
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._data, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : string
Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
A DataFrame with one row for each match, and one column for each
group. Its rows have a MultiIndex with first levels that come from
the subject Series. The last level is named 'match' and indicates
the order in the subject. Any capture group names in regular
expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : returns first match only (not all matches)
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall("[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall("[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall("(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall("(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key, )
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group
for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i, ))
index_list.append(result_key)
if 0 < len(index_list):
from pandas import MultiIndex
index = MultiIndex.from_tuples(
index_list, names=arr.index.names + ["match"])
else:
index = None
result = arr._constructor_expanddim(match_list, index=index,
columns=columns)
return result
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - set([""]))
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with
passed delimiter. Equivalent to :meth:`str.join`.
Parameters
----------
sep : string
Delimiter
Returns
-------
joined : Series/Index of objects
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the
Series/Index. Equivalent to :func:`re.findall`.
Parameters
----------
pat : string
Pattern or regular expression
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
Returns
-------
matches : Series/Index of lists
See Also
--------
extractall : returns DataFrame with one column per capture group
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index with an additional character to
specified side.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with spaces
side : {'left', 'right', 'both'}, default 'left'
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
padded : Series/Index of objects
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
"""
Split each string (a la re.split) in the Series/Index by given
pattern, propagating NA values. Equivalent to :meth:`str.split`.
Parameters
----------
pat : string, default None
String or regular expression to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
return_type : deprecated, use `expand`
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
"""
Split each string in the Series/Index by the given delimiter
string, starting at the end of the string and working to the front.
Equivalent to :meth:`str.rsplit`.
Parameters
----------
pat : string, default None
Separator to split on. If None, splits on whitespace
n : int, default -1 (all)
None, 0 and -1 will be interpreted as return all splits
expand : bool, default False
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : Series/Index or DataFrame/MultiIndex of objects
"""
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series/Index
Parameters
----------
start : int or None
stop : int or None
step : int or None
Returns
-------
sliced : Series/Index of objects
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a slice of each string in the Series/Index with another
string.
Parameters
----------
start : int or None
stop : int or None
repl : str or None
String for replacement
Returns
-------
replaced : Series/Index of objects
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
from pandas import compat
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table "
"argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Integer index (location)
Returns
-------
items : Series/Index of objects
"""
f = lambda x: x[i] if len(x) > i else np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : Series/Index of objects
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._data, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._data, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._data, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._data, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._is_categorical = is_categorical_dtype(data)
self._data = data.cat.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when infered
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, 'name', None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
@copy(str_cat)
def cat(self, others=None, sep=None, na_rep=None):
data = self._orig if self._is_categorical else self._data
result = str_cat(data, others=others, sep=sep, na_rep=na_rep)
return self._wrap_result(result, use_codes=(not self._is_categorical))
@copy(str_split)
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
@copy(str_rsplit)
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._data, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`, and return 3 elements
containing the part before the separator, the separator itself,
and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : string, default whitespace
String to split on.
expand : bool, default True
* If True, return DataFrame/MultiIndex expanding dimensionality.
* If False, return Series/Index.
Returns
-------
split : DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Examples
--------
>>> s = Series(['A_B_C', 'D_E_F', 'X'])
0 A_B_C
1 D_E_F
2 X
dtype: object
>>> s.str.partition('_')
0 1 2
0 A _ B_C
1 D _ E_F
2 X
>>> s.str.rpartition('_')
0 1 2
0 A_B _ C
1 D_E _ F
2 X
""")
@Appender(_shared_docs['str_partition'] % {
'side': 'first',
'return': '3 elements containing the string itself, followed by two '
'empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'
})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {
'side': 'last',
'return': '3 elements containing two empty strings, followed by the '
'string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'
})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._data)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._data, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._data, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._data, pat, case=case, flags=flags, na=na,
regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None):
result = str_match(self._data, pat, case=case, flags=flags, na=na,
as_indexer=as_indexer)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0):
result = str_replace(self._data, pat, repl, n=n, case=case,
flags=flags)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._data, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._data, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
"""
Filling left side of strings in the Series/Index with 0.
Equivalent to :meth:`str.zfill`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with 0
Returns
-------
filled : Series/Index of objects
"""
result = str_pad(self._data, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._data, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._data, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._data, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._data, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = ("""
Strip whitespace (including newlines) from each string in the
Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
stripped : Series/Index of objects
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._data, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._data, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._data
result, name = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical),
name=name, expand=True)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._data, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0, expand=None):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] %
dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] %
dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._data)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] %
dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] %
dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._data, sub, start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Compute length of each string in the Series/Index.
Returns
-------
lengths : Series/Index of integer values
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
converted : Series/Index of objects
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string in the Series/Index
are %(type)s. Equivalent to :meth:`str.%(method)s`.
Returns
-------
is : Series/array of boolean values
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
@classmethod
def _make_accessor(cls, data):
from pandas.core.index import Index
if (isinstance(data, ABCSeries) and
not ((is_categorical_dtype(data.dtype) and
is_object_dtype(data.values.categories)) or
(is_object_dtype(data.dtype)))):
# it's neither a string series not a categorical series with
# strings inside the categories.
# this really should exclude all series with any non-string values
# (instead of test for object dtype), but that isn't practical for
# performance reasons until we have a str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
# see scc/inferrence.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if data.inferred_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or "
"'mixed')")
raise AttributeError(message)
if data.nlevels > 1:
message = ("Can only use .str accessor with Index, not "
"MultiIndex")
raise AttributeError(message)
return cls(data)
|
bsd-3-clause
|
csxeba/brainforge
|
xperiments/xp_sin.py
|
1
|
1670
|
import numpy as np
from matplotlib import pyplot as plt
from brainforge.learner import Backpropagation
from brainforge.layers import Dense
np.random.seed(1234)
rX = np.linspace(-6., 6., 200)[:, None]
rY = np.sin(rX)
arg = np.arange(len(rX))
np.random.shuffle(arg)
targ, varg = arg[:100], arg[100:]
targ.sort()
varg.sort()
tX, tY = rX[targ], rY[targ]
vX, vY = rX[varg], rY[varg]
tX += np.random.randn(*tX.shape) / np.sqrt(tX.size*0.25)
net = Backpropagation([Dense(120, activation="tanh"),
Dense(120, activation="tanh"),
Dense(1, activation="linear")],
input_shape=1, optimizer="adam")
tpred = net.predict(tX)
vpred = net.predict(vX)
plt.ion()
plt.plot(tX, tY, "b--", alpha=0.5, label="Training data (noisy)")
plt.plot(rX, rY, "r--", alpha=0.5, label="Validation data (clean)")
plt.ylim(-2, 2)
plt.plot(rX, np.ones_like(rX), c="black", linestyle="--")
plt.plot(rX, -np.ones_like(rX), c="black", linestyle="--")
plt.plot(rX, np.zeros_like(rX), c="grey", linestyle="--")
tobj, = plt.plot(tX, tpred, "bo", markersize=3, alpha=0.5, label="Training pred")
vobj, = plt.plot(vX, vpred, "ro", markersize=3, alpha=0.5, label="Validation pred")
templ = "Batch: {:>5}, tMSE: {:>.4f}, vMSE: {:>.4f}"
t = plt.title(templ.format(0, 0., 0.))
plt.legend()
batchno = 1
while 1:
tmetrics = net.learn_batch(tX, tY)
tpred = net.predict(tX)
vpred = net.predict(vX)
vcost = net.cost(vpred, vY) / len(vpred)
tobj.set_data(tX, tpred)
vobj.set_data(vX, vpred)
plt.pause(0.1)
t.set_text(templ.format(batchno, tmetrics["cost"], vcost))
batchno += 1
|
gpl-3.0
|
gfyoung/scipy
|
scipy/spatial/_plotutils.py
|
1
|
6244
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = getattr(ax, 'ishold', lambda: True)()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
point_size: float, optional
Specifies the size of points
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
Examples
--------
Set of point:
>>> import matplotlib.pyplot as plt
>>> points = np.random.rand(10,2) #random
Voronoi diagram of the points:
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
>>> vor = Voronoi(points)
using `voronoi_plot_2d` for visualisation:
>>> fig = voronoi_plot_2d(vor)
using `voronoi_plot_2d` for visualisation with enhancements:
>>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
... line_width=2, line_alpha=0.6, point_size=2)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
point_size = kw.get('point_size', None)
ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size)
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
sklearn/utils/tests/test_linear_assignment.py
|
421
|
1349
|
# Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
benchmarks/bench_covertype.py
|
57
|
7378
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
dandxy89/rf_helicopter
|
Plot_results.py
|
1
|
4253
|
# Purpose: Main Plotting Results Script
#
# Info: Plotting the Results by Case and Model (1, 2 or 3)
#
# Developed as part of the Software Agents Course at City University
#
# Dev: Dan Dixey and Enrico Lopedoto
#
# Updated: 5/3/2016
#
import json
import logging
import os
from random import choice
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import normalize
from Model import World as W
from Model.Utils import moving_average
plt.style.use('ggplot')
# Logging Controls Level of Printing
logging.basicConfig(format='[%(asctime)s] : [%(levelname)s] : [%(message)s]',
level=logging.DEBUG)
case_name = 'case_four'
model = str(3)
directory = os.path.join(os.getcwd(), 'Results',
case_name)
data = json.loads(open(directory + '/Model{}.json'.format(model), 'r').read())
HeliWorld = W.helicopter_world(file_name="Track_1.npy")
xlim_val = int(data['model_names'][0]['trials'])
nb_action = int(data['model_names'][0]['nb_actions'])
n_items = len(data['best_test'])
# Plotting Colors
colors = ['coral', 'green', 'red', 'cyan', 'magenta',
'yellow', 'blue', 'white', 'fuchsia', 'orangered', 'steelblue']
fig = plt.figure()
fig.canvas.draw()
plt.subplot(2, 2, 1)
plt.title('Post Training Path - Epsilon = 0',
fontsize=10)
plt.xlabel('Track Length', fontsize=8)
plt.ylabel('Track Width', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, HeliWorld.track_width)
my_axis.set_ylim(0, HeliWorld.track_height)
im1 = plt.imshow(HeliWorld.track,
cmap=plt.get_cmap('gray'),
interpolation='nearest',
vmin=-1,
vmax=8)
plt.colorbar(im1, fraction=0.01, pad=0.01)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in data['best_test'][i]:
x.append(each_item[0])
y.append(each_item[1])
# Plot Scatter
plt.scatter(x=x,
y=y,
s=np.pi * (1 * 1) ** 2,
c=colors[i])
logging.info('Plotting the Q-Matrix')
plt.subplot(2, 2, 2)
selection = choice(range(len(data['best_test'])))
plt.title('Final Q Matrix', fontsize=10)
plt.xlabel('Track Length', fontsize=8)
plt.ylabel('Track Width', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, HeliWorld.track_width)
my_axis.set_ylim(0, HeliWorld.track_height)
q_data = np.array(data['q_plot'][selection])
im2 = plt.imshow(normalize(q_data))
plt.colorbar(im2, fraction=0.01, pad=0.01)
logging.info('Completion Chart - Time per Trial')
plt.subplot(2, 2, 3)
plt.title('Completion Chart - Time per Trial', fontsize=10)
plt.xlabel('Trial Numbers', fontsize=8)
plt.ylabel('LOG(Seconds Per Trial)', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, xlim_val)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in data['time_chart'][i]:
x.append(each_item[0])
y.append(each_item[1])
# Plot Scatter
plt.scatter(x=x,
y=np.log(y),
s=np.pi * (1 * 1) ** 2,
c=colors[i])
plt.subplot(2, 2, 4)
plt.title('Learning Chart - Averaged Trial Plot', fontsize=10)
plt.xlabel('Trial Numbers', fontsize=8)
plt.ylabel('End Location', fontsize=8)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in data['final_location'][i]:
x.append(each_item[0])
y.append(each_item[1])
y = moving_average(y, 60)
plt.plot(x, y, linewidth=1, c=colors[i])
logging.info('Plotting Figure Label')
title_text = '|| Case - {} | Number of Trials - {} | Model - {} | Number of Actions - {} ||\n\
|| TRACK | Width - {} | Height - {} ||'.format(case_name,
xlim_val,
model,
nb_action,
HeliWorld.track_width,
HeliWorld.track_height)
fig.suptitle(title_text)
logging.info('Saved Figure of the Plot')
fig.savefig(directory + '/Plot/Final_Plot_{}.png'.format(model))
|
mit
|
mcneela/Retina
|
retina/nldr/sammon.py
|
1
|
1676
|
import numpy as np
from sklearn.datasets import make_swiss_roll
from sklearn.metrics.pairwise import euclidean_distances
def sammon(data, target_dim=2, max_iterations=250, max_halves=10):
"""
Adopted from the Matlab implementation by Dr. Gavin C. Cawley.
Matlab source can be found here:
https://people.sc.fsu.edu/~jburkardt/m_src/profile/sammon_test.m
"""
TolFun = 1 * 10 ** (-9)
D = euclidean_distances(data, data)
N = data.shape[0]
scale = np.sum(D.flatten('F'))
D = D + np.identity(N)
D_inv = np.linalg.inv(D)
y = np.random.randn(N, target_dim)
one = np.ones((N, target_dim))
d = euclidean_distances(y, y) + np.identity(N)
d_inv = np.linalg.inv(d)
delta = D - d
E = np.sum(np.sum(np.power(delta, 2) * D_inv))
for i in range(max_iterations):
delta = d_inv - D_inv
deltaone = np.dot(delta, one)
g = np.dot(delta, y) - y * deltaone
dinv3 = np.power(d_inv, 3)
y2 = np.power(y, 2)
H = np.dot(dinv3, y2) - deltaone - 2 * np.multiply(y, np.dot(dinv3, y)) + np.multiply(y2, np.dot(dinv3, one))
s = np.divide(-np.transpose(g.flatten('F')), np.transpose(np.abs(H.flatten('F'))))
y_old = y
for j in range(max_halves):
[rows, columns] = y.shape
y = y_old.flatten('F') + s
y = y.reshape(rows, columns)
d = euclidean_distances(y, y) + np.identity(N)
d_inv = np.linalg.inv(d)
delta = D - d
E_new = np.sum(np.sum(np.power(delta, 2) * D_inv))
if E_new < E:
break
else:
s = 0.5 * s
E = E_new
E = E * scale
return (y, E)
|
bsd-3-clause
|
pombredanne/vera
|
setup.py
|
1
|
1898
|
from os.path import join, dirname
from setuptools import setup
LONG_DESCRIPTION = """
Reference implementation of the ERAV model, an extension to EAV with support for maintaining multiple versions of an entity with different provenance.
"""
def parse_markdown_readme():
"""
Convert README.md to RST via pandoc, and load into memory
(fallback to LONG_DESCRIPTION on failure)
"""
# Attempt to run pandoc on markdown file
import subprocess
try:
subprocess.call(
['pandoc', '-t', 'rst', '-o', 'README.rst', 'README.md']
)
except OSError:
return LONG_DESCRIPTION
# Attempt to load output
try:
readme = open(join(dirname(__file__), 'README.rst'))
except IOError:
return LONG_DESCRIPTION
return readme.read()
setup(
name='vera',
version='0.8.0-dev',
author='S. Andrew Sheppard',
author_email='[email protected]',
url='http://wq.io/vera',
license='MIT',
description='Reference implementation of the ERAV data integration model',
long_description=parse_markdown_readme(),
packages=[
'vera',
'vera.migrations',
],
install_requires=[
'rest-pandas',
'wq.db>=0.7.0',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: JavaScript',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Database :: Database Engines/Servers',
],
test_suite='tests',
tests_require=[
'psycopg2',
],
)
|
mit
|
nguyentu1602/statsmodels
|
examples/python/glm_formula.py
|
33
|
1547
|
## Generalized Linear Models (Formula)
# This notebook illustrates how you can use R-style formulas to fit Generalized Linear Models.
#
# To begin, we load the ``Star98`` dataset and we construct a formula and pre-process the data:
from __future__ import print_function
import statsmodels.api as sm
import statsmodels.formula.api as smf
star98 = sm.datasets.star98.load_pandas().data
formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',
'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK',
'PERSPENK', 'PTRATIO', 'PCTAF']]
endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW'))
del dta['NABOVE']
dta['SUCCESS'] = endog
# Then, we fit the GLM model:
mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod1.summary()
# Finally, we define a function to operate customized data transformation using the formula framework:
def double_it(x):
return 2 * x
formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod2.summary()
# As expected, the coefficient for ``double_it(LOWINC)`` in the second model is half the size of the ``LOWINC`` coefficient from the first model:
print(mod1.params[1])
print(mod2.params[1] * 2)
|
bsd-3-clause
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/tests/dtypes/test_io.py
|
14
|
4606
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas._libs.lib as lib
import pandas.util.testing as tm
from pandas.compat import long, u
class TestParseSQL(object):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
tm.assert_numpy_array_equal(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
tm.assert_numpy_array_equal(result, expected)
def test_convert_downcast_int64(self):
from pandas._libs.parsers import na_values
arr = np.array([1, 2, 7, 8, 10], dtype=np.int64)
expected = np.array([1, 2, 7, 8, 10], dtype=np.int8)
# default argument
result = lib.downcast_int64(arr, na_values)
tm.assert_numpy_array_equal(result, expected)
result = lib.downcast_int64(arr, na_values, use_unsigned=False)
tm.assert_numpy_array_equal(result, expected)
expected = np.array([1, 2, 7, 8, 10], dtype=np.uint8)
result = lib.downcast_int64(arr, na_values, use_unsigned=True)
tm.assert_numpy_array_equal(result, expected)
# still cast to int8 despite use_unsigned=True
# because of the negative number as an element
arr = np.array([1, 2, -7, 8, 10], dtype=np.int64)
expected = np.array([1, 2, -7, 8, 10], dtype=np.int8)
result = lib.downcast_int64(arr, na_values, use_unsigned=True)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([1, 2, 7, 8, 300], dtype=np.int64)
expected = np.array([1, 2, 7, 8, 300], dtype=np.int16)
result = lib.downcast_int64(arr, na_values)
tm.assert_numpy_array_equal(result, expected)
int8_na = na_values[np.int8]
int64_na = na_values[np.int64]
arr = np.array([int64_na, 2, 3, 10, 15], dtype=np.int64)
expected = np.array([int8_na, 2, 3, 10, 15], dtype=np.int8)
result = lib.downcast_int64(arr, na_values)
tm.assert_numpy_array_equal(result, expected)
|
mit
|
hugobowne/scikit-learn
|
sklearn/svm/setup.py
|
321
|
3157
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
fengzhyuan/scikit-learn
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
rahuldhote/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
FUSED-Wind/fusedwind
|
src/fusedwind/lib/environment.py
|
2
|
11963
|
#!/usr/bin/env python
# encoding: utf-8
"""
environment.py
Created by Andrew Ning on 2012-01-20.
Copyright (c) NREL. All rights reserved.
"""
import math
import numpy as np
from scipy.optimize import brentq
from openmdao.main.api import Component
from openmdao.main.datatypes.api import Float, Array
from utilities import hstack, vstack
# -----------------
# Base Components
# -----------------
class WindBase(Component):
"""base component for wind speed/direction"""
# TODO: if I put required=True here for Uref there is another bug
# variables
Uref = Float(iotype='in', units='m/s', desc='reference wind speed (usually at hub height)')
zref = Float(iotype='in', units='m', desc='corresponding reference height')
z = Array(iotype='in', units='m', desc='heights where wind speed should be computed')
# parameters
z0 = Float(0.0, iotype='in', units='m', desc='bottom of wind profile (height of ground/sea)')
# out
U = Array(iotype='out', units='m/s', desc='magnitude of wind speed at each z location')
beta = Array(iotype='out', units='deg', desc='corresponding wind angles relative to inertial coordinate system')
missing_deriv_policy = 'assume_zero' # TODO: for now OpenMDAO issue
class WaveBase(Component):
"""base component for wave speed/direction"""
# variables
z = Array(iotype='in', units='m', desc='heights where wave speed should be computed')
# out
U = Array(iotype='out', units='m/s', desc='magnitude of wave speed at each z location')
A = Array(iotype='out', units='m/s**2', desc='magnitude of wave acceleration at each z location')
beta = Array(iotype='out', units='deg', desc='corresponding wave angles relative to inertial coordinate system')
U0 = Float(iotype='out', units='m/s', desc='magnitude of wave speed at z=MSL')
A0 = Float(iotype='out', units='m/s**2', desc='magnitude of wave acceleration at z=MSL')
beta0 = Float(iotype='out', units='deg', desc='corresponding wave angles relative to inertial coordinate system at z=MSL')
missing_deriv_policy = 'assume_zero'
def execute(self):
"""default to no waves"""
n = len(self.z)
self.U = np.zeros(n)
self.A = np.zeros(n)
self.beta = np.zeros(n)
self.U0 = 0.
self.A0 = 0.
self.beta0 = 0.
class SoilBase(Component):
"""base component for soil stiffness"""
# out
k = Array(iotype='out', units='N/m', required=True, desc='spring stiffness. rigid directions should use \
``float(''inf'')``. order: (x, theta_x, y, theta_y, z, theta_z)')
missing_deriv_policy = 'assume_zero' # TODO: for now OpenMDAO issue
# -----------------------
# Subclassed Components
# -----------------------
class PowerWind(WindBase):
"""power-law profile wind. any nodes must not cross z0, and if a node is at z0
it must stay at that point. otherwise gradients crossing the boundary will be wrong."""
# parameters
shearExp = Float(0.2, iotype='in', desc='shear exponent')
betaWind = Float(0.0, iotype='in', units='deg', desc='wind angle relative to inertial coordinate system')
missing_deriv_policy = 'assume_zero'
def execute(self):
# rename
z = self.z
zref = self.zref
z0 = self.z0
# velocity
idx = z > z0
n = len(z)
self.U = np.zeros(n)
self.U[idx] = self.Uref*((z[idx] - z0)/(zref - z0))**self.shearExp
self.beta = self.betaWind*np.ones_like(z)
# # add small cubic spline to allow continuity in gradient
# k = 0.01 # fraction of profile with cubic spline
# zsmall = z0 + k*(zref - z0)
# self.spline = CubicSpline(x1=z0, x2=zsmall, f1=0.0, f2=Uref*k**shearExp,
# g1=0.0, g2=Uref*k**shearExp*shearExp/(zsmall - z0))
# idx = np.logical_and(z > z0, z < zsmall)
# self.U[idx] = self.spline.eval(z[idx])
# self.zsmall = zsmall
# self.k = k
def list_deriv_vars(self):
inputs = ('Uref', 'z', 'zref')
outputs = ('U',)
return inputs, outputs
def provideJ(self):
# rename
z = self.z
zref = self.zref
z0 = self.z0
shearExp = self.shearExp
U = self.U
Uref = self.Uref
# gradients
n = len(z)
dU_dUref = np.zeros(n)
dU_dz = np.zeros(n)
dU_dzref = np.zeros(n)
idx = z > z0
dU_dUref[idx] = U[idx]/Uref
dU_dz[idx] = U[idx]*shearExp/(z[idx] - z0)
dU_dzref[idx] = -U[idx]*shearExp/(zref - z0)
# # cubic spline region
# idx = np.logical_and(z > z0, z < zsmall)
# # d w.r.t z
# dU_dz[idx] = self.spline.eval_deriv(z[idx])
# # d w.r.t. Uref
# df2_dUref = k**shearExp
# dg2_dUref = k**shearExp*shearExp/(zsmall - z0)
# dU_dUref[idx] = self.spline.eval_deriv_params(z[idx], 0.0, 0.0, 0.0, df2_dUref, 0.0, dg2_dUref)
# # d w.r.t. zref
# dx2_dzref = k
# dg2_dzref = -Uref*k**shearExp*shearExp/k/(zref - z0)**2
# dU_dzref[idx] = self.spline.eval_deriv_params(z[idx], 0.0, dx2_dzref, 0.0, 0.0, 0.0, dg2_dzref)
J = hstack([dU_dUref, np.diag(dU_dz), dU_dzref])
return J
class LogWind(WindBase):
"""logarithmic-profile wind"""
# parameters
z_roughness = Float(10.0, iotype='in', units='mm', desc='surface roughness length')
betaWind = Float(0.0, iotype='in', units='deg', desc='wind angle relative to inertial coordinate system')
missing_deriv_policy = 'assume_zero'
def execute(self):
# rename
z = self.z
zref = self.zref
z0 = self.z0
z_roughness = self.z_roughness/1e3 # convert to m
# find velocity
idx = [z - z0 > z_roughness]
self.U = np.zeros_like(z)
self.U[idx] = self.Uref*np.log((z[idx] - z0)/z_roughness) / math.log((zref - z0)/z_roughness)
self.beta = self.betaWind*np.ones_like(z)
def list_deriv_vars(self):
inputs = ('Uref', 'z', 'zref')
outputs = ('U',)
return inputs, outputs
def provideJ(self):
# rename
z = self.z
zref = self.zref
z0 = self.z0
z_roughness = self.z_roughness/1e3
Uref = self.Uref
n = len(z)
dU_dUref = np.zeros(n)
dU_dz_diag = np.zeros(n)
dU_dzref = np.zeros(n)
idx = [z - z0 > z_roughness]
lt = np.log((z[idx] - z0)/z_roughness)
lb = math.log((zref - z0)/z_roughness)
dU_dUref[idx] = lt/lb
dU_dz_diag[idx] = Uref/lb / (z[idx] - z0)
dU_dzref[idx] = -Uref*lt / math.log((zref - z0)/z_roughness)**2 / (zref - z0)
J = hstack([dU_dUref, np.diag(dU_dz_diag), dU_dzref])
return J
class LinearWaves(WaveBase):
"""linear (Airy) wave theory"""
# variables
Uc = Float(iotype='in', units='m/s', desc='mean current speed')
# parameters
z_surface = Float(iotype='in', units='m', desc='vertical location of water surface')
hs = Float(iotype='in', units='m', desc='significant wave height (crest-to-trough)')
T = Float(iotype='in', units='s', desc='period of waves')
z_floor = Float(0.0, iotype='in', units='m', desc='vertical location of sea floor')
g = Float(9.81, iotype='in', units='m/s**2', desc='acceleration of gravity')
betaWave = Float(0.0, iotype='in', units='deg', desc='wave angle relative to inertial coordinate system')
def execute(self):
# water depth
d = self.z_surface - self.z_floor
# design wave height
h = 1.1*self.hs
# circular frequency
omega = 2.0*math.pi/self.T
# compute wave number from dispersion relationship
k = brentq(lambda k: omega**2 - self.g*k*math.tanh(d*k), 0, 10*omega**2/self.g)
# zero at surface
z_rel = self.z - self.z_surface
# maximum velocity
self.U = h/2.0*omega*np.cosh(k*(z_rel + d))/math.sinh(k*d) + self.Uc
self.U0 = h/2.0*omega*np.cosh(k*(0. + d))/math.sinh(k*d) + self.Uc
# check heights
self.U[np.logical_or(self.z < self.z_floor, self.z > self.z_surface)] = 0.
# acceleration
self.A = self.U * omega
self.A0 = self.U0 * omega
# angles
self.beta = self.betaWave*np.ones_like(self.z)
self.beta0 =self.betaWave
# derivatives
dU_dz = h/2.0*omega*np.sinh(k*(z_rel + d))/math.sinh(k*d)*k
dU_dUc = np.ones_like(self.z)
idx = np.logical_or(self.z < self.z_floor, self.z > self.z_surface)
dU_dz[idx] = 0.0
dU_dUc[idx] = 0.0
dA_dz = omega*dU_dz
dA_dUc = omega*dU_dUc
self.J = vstack([hstack([np.diag(dU_dz), dU_dUc]), hstack([np.diag(dA_dz), dA_dUc])])
def list_deriv_vars(self):
inputs = ('z', 'Uc')
outputs = ('U', 'A', 'U0', 'A0')
return inputs, outputs
def provideJ(self):
return self.J
class TowerSoil(SoilBase):
"""textbook soil stiffness method"""
# variable
r0 = Float(1.0, iotype='in', units='m', desc='radius of base of tower')
depth = Float(1.0, iotype='in', units='m', desc='depth of foundation in the soil')
# parameter
G = Float(140e6, iotype='in', units='Pa', desc='shear modulus of soil')
nu = Float(0.4, iotype='in', desc='Poisson''s ratio of soil')
rigid = Array(iotype='in', dtype=np.bool, desc='directions that should be considered infinitely rigid\
order is x, theta_x, y, theta_y, z, theta_z')
missing_deriv_policy = 'assume_zero'
def execute(self):
G = self.G
nu = self.nu
h = self.depth
r0 = self.r0
# vertical
eta = 1.0 + 0.6*(1.0-nu)*h/r0
k_z = 4*G*r0*eta/(1.0-nu)
# horizontal
eta = 1.0 + 0.55*(2.0-nu)*h/r0
k_x = 32.0*(1.0-nu)*G*r0*eta/(7.0-8.0*nu)
# rocking
eta = 1.0 + 1.2*(1.0-nu)*h/r0 + 0.2*(2.0-nu)*(h/r0)**3
k_thetax = 8.0*G*r0**3*eta/(3.0*(1.0-nu))
# torsional
k_phi = 16.0*G*r0**3/3.0
self.k = np.array([k_x, k_thetax, k_x, k_thetax, k_z, k_phi])
self.k[self.rigid] = float('inf')
def list_deriv_vars(self):
inputs = ('r0', 'depth')
outputs = ('k',)
return inputs, outputs
def provideJ(self):
G = self.G
nu = self.nu
h = self.depth
r0 = self.r0
# vertical
eta = 1.0 + 0.6*(1.0-nu)*h/r0
deta_dr0 = -0.6*(1.0-nu)*h/r0**2
dkz_dr0 = 4*G/(1.0-nu)*(eta + r0*deta_dr0)
deta_dh = 0.6*(1.0-nu)/r0
dkz_dh = 4*G*r0/(1.0-nu)*deta_dh
# horizontal
eta = 1.0 + 0.55*(2.0-nu)*h/r0
deta_dr0 = -0.55*(2.0-nu)*h/r0**2
dkx_dr0 = 32.0*(1.0-nu)*G/(7.0-8.0*nu)*(eta + r0*deta_dr0)
deta_dh = 0.55*(2.0-nu)/r0
dkx_dh = 32.0*(1.0-nu)*G*r0/(7.0-8.0*nu)*deta_dh
# rocking
eta = 1.0 + 1.2*(1.0-nu)*h/r0 + 0.2*(2.0-nu)*(h/r0)**3
deta_dr0 = -1.2*(1.0-nu)*h/r0**2 - 3*0.2*(2.0-nu)*(h/r0)**3/r0
dkthetax_dr0 = 8.0*G/(3.0*(1.0-nu))*(3*r0**2*eta + r0**3*deta_dr0)
deta_dh = 1.2*(1.0-nu)/r0 + 3*0.2*(2.0-nu)*(1.0/r0)**3*h**2
dkthetax_dh = 8.0*G*r0**3/(3.0*(1.0-nu))*deta_dh
# torsional
dkphi_dr0 = 16.0*G*3*r0**2/3.0
dkphi_dh = 0.0
dk_dr0 = np.array([dkx_dr0, dkthetax_dr0, dkx_dr0, dkthetax_dr0, dkz_dr0, dkphi_dr0])
dk_dr0[self.rigid] = 0.0
dk_dh = np.array([dkx_dh, dkthetax_dh, dkx_dh, dkthetax_dh, dkz_dh, dkphi_dh])
dk_dh[self.rigid] = 0.0
J = hstack((dk_dr0, dk_dh))
return J
if __name__ == '__main__':
p = LogWind()
p.Uref = 10.0
p.zref = 100.0
p.z0 = 1.0
p.z = np.linspace(1.0, 5, 20)
p.shearExp = 0.2
p.betaWind = 0.0
p.run()
import matplotlib.pyplot as plt
plt.plot(p.z, p.U)
plt.show()
|
apache-2.0
|
JackKelly/neuralnilm_prototype
|
scripts/e344.py
|
2
|
6585
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False, loss_func=mdn_nll),
updates_func=momentum,
learning_rate=5e-3,
learning_rate_changes_by_iteration={
50: 1e-3,
200: 5e-4,
400: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 1024
NUM_FILTERS = 50
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(source.seq_length))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.output_shape()[1] * source.output_shape()[2],
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (16 * 256, 5)
},
# {
# 'type': DenseLayer,
# 'num_units': source.output_shape()[1] * source.output_shape()[2],
# 'W': Normal(std=1/sqrt(N)),
# 'nonlinearity': T.nnet.softplus
# }
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1,
'nonlinearity_mu': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
krez13/scikit-learn
|
sklearn/cluster/spectral.py
|
233
|
18153
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
bsd-3-clause
|
Fougere87/pysingcells
|
pysingcells/counting/featurecounts.py
|
1
|
3551
|
# -*- coding: utf-8 -*-
# std import
import os
import subprocess
# pandas import
from pandas import DataFrame, read_csv
# project import
from ..logger import log
from . import abccounting
from .abccounting import AbcCounting
# tested import
try:
from os import scandir
except ImportError:
from scandir import scandir
class FeatureCounts(AbcCounting):
""" Class for run Feature Counts """
def __init__(self):
""" Intialize featureCounts runner object """
super().__init__()
# command flag
self.annotation_flag = "-a"
self.input_flag = ""
self.output_flag = "-o"
def _run_counting(self):
""" Run count effectively """
log.info(self.get_name() + " run raw count")
raw_output = self.out_path + "raw_counts.tsv"
list_file = [input_file.path for input_file in scandir(self.in_path)
if input_file.is_file() and
input_file.name.endswith(".sam")]
base_command = [self.bin_path]
base_command += self.options.split(" ")
base_command += [self.annotation_flag, self.annotation_path,
self.output_flag, raw_output, self.input_flag]
base_command += list_file
process = subprocess.Popen(base_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
self._write_process(base_command, self.get_name(),
*process.communicate())
return process.returncode == 0
def _convert_output(self):
""" Convert output in useful format """
log.info(self.get_name() + " convert raw count in tpm ")
# reading
header = read_csv(self.out_path + "raw_counts.tsv",
header=1, sep='\t', chunksize=1)
col_type = {index: float if val == int else val
for index, val in header.read().dtypes.iteritems()}
raw_data = read_csv(self.out_path + "raw_counts.tsv",
header=1, sep='\t', dtype=col_type)
# reading summary for nb of read mapped
summary = read_csv(self.out_path + "raw_counts.tsv.summary", header=0,
sep='\t')
summary.columns = [label.replace(";", "")
for label in summary.columns]
# cleaning
delete_col = ["Chr", "Start", "End", "Strand"]
for colname in delete_col:
raw_data.drop(colname, inplace=True, axis=1)
# get normalisation function
compute_new_val = getattr(abccounting, self.compute_norm)
for sample in raw_data.columns[2:]:
log.info("\t for sample " + sample)
print(sample)
nb_heat = summary[[sample]].sum(numeric_only=True)
for index, count in raw_data[sample].iteritems():
if count != 0:
new_val = compute_new_val(count, raw_data["Length"][index],
nb_heat)
raw_data.set_value(index, sample, new_val)
# more cleaning
raw_data.columns = [os.path.basename(name).split('.')[0] for name
in raw_data.columns]
raw_data.drop("Length", inplace=True, axis=1)
# writing
raw_data.to_csv(self.out_path + "clean_" + self.compute_norm
+ ".csv", index=False)
return True
|
mit
|
del680202/MachineLearning-memo
|
src/lr/multiple-features-linear-regression-sample.py
|
1
|
1390
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
x = [45.9, 41.3, 10.8, 48.9, 32.8, 19.6, 2.1, 2.6, 5.8, 24, 35.1, 7.6, 32.9, 39.6, 20.5, 23.9, 27.7, 5.1, 15.9, 16.9, 12.6, 3.5, 29.3, 16.7, 27.1, 16, 28.3, 17.4, 1.5, 20, 1.4, 4.1, 43.8, 49.4, 26.7, 37.7, 22.3, 33.4, 27.7, 8.4, 25.7, 22.5, 9.9, 41.5, 15.8, 11.7]
y = [69.3, 58.5, 58.4, 75, 23.5, 11.6, 1, 21.2, 24.2, 4, 65.9, 7.2, 46, 55.8, 18.3, 19.1, 53.4, 23.5, 49.6, 26.2, 18.3, 19.5, 12.6, 22.9, 22.9, 40.8, 43.2, 38.6, 30, 0.3, 7.4, 8.5, 5, 45.7, 35.1, 32, 31.6, 38.7, 1.8, 26.4, 43.3, 31.5, 35.7, 18.5, 49.9, 36.8]
z = [9.3, 18.5, 12.9, 7.2, 11.8, 13.2, 4.8, 10.6, 8.6, 17.4, 9.2, 9.7, 19, 24.4, 11.3, 14.6, 18, 12.5, 5.6, 15.5, 9.7, 12, 15, 15.9, 18.9, 10.5, 21.4, 11.9, 9.6, 17.4, 9.5, 12.8, 25.4, 14.7, 10.1, 21.5, 16.6, 17.1, 20.7, 12.9, 8.5, 14.9, 10.6, 23.2, 14.8, 9.7]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.mat([x, y]).T
X = np.insert(X, 0, np.ones(46), 1)
Y = np.mat([z]).T
#np.linalg.lstsq(X,Y)
W = X.I * Y
LR = lambda x, y: W[0, 0] + W[1, 0] * x + W[2, 0] * y
x2 = y2 = np.arange(0, 60.0)
X, Y = np.meshgrid(x2, y2)
zs = np.array([LR(x2,y2) for x2,y2 in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
ax.plot_surface(X, Y, Z)
ax.scatter(x, y, z)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
|
apache-2.0
|
xodus7/tensorflow
|
tensorflow/python/estimator/canned/linear_testing_utils.py
|
2
|
86121
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables_lib.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(session_run_hook.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = variable_scope.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(
feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables_lib.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([2.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with ops.Graph().as_default():
variables_lib.Variable(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([7.0, 8.0], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column(
'age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with ops.Graph().as_default():
variables_lib.Variable([[10.0]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
variables_lib.Variable([5.0], name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column_lib.numeric_column('age'),
feature_column_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([20, 40]),
'height': np.array([4, 8])},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, ops.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables_lib.Variable([[10.]], name='linear/linear_model/x/weights')
variables_lib.Variable([.2], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),)
with ops.Graph().as_default():
variables_lib.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
variables_lib.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with ops.Graph().as_default():
variables_lib.Variable([[10.]], name='linear/linear_model/x0/weights')
variables_lib.Variable([[20.]], name='linear/linear_model/x1/weights')
variables_lib.Variable([.2], name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x0': np.array([[2.]]),
'x1': np.array([[3.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with ops.Graph().as_default():
variables_lib.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(1, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return dataset_ops.Dataset.from_tensors({
'language': sparse_tensor.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (
feature_column_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir)
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer_lib.Optimizer,
wraps=optimizer_lib.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
checkpoint_utils.load_variable(self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return state_ops.assign_add(global_step, 1).op
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return state_ops.assign_add(global_step, 1).op
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer_lib.Optimizer,
wraps=optimizer_lib.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension],
shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(expected_age_weight,
checkpoint_utils.load_variable(
self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 2.1269)
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables_lib.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables_lib.Variable(bias, name=BIAS_NAME)
variables_lib.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'classes': [label_output_fn(0)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with ops.Graph().as_default():
variables_lib.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
variables_lib.Variable([bias], name=BIAS_NAME)
variables_lib.Variable(1, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return dataset_ops.Dataset.from_tensors({
'language': sparse_tensor.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (
feature_column_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir)
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = feature_column_lib.numeric_column('age')
with ops.Graph().as_default():
logit_fn = linear._linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/age')[0]
with tf_session.Session() as sess:
sess.run([variables_lib.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
"""Tests the calculation of sparsity."""
age = feature_column_lib.numeric_column('age')
occupation = feature_column_lib.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with ops.Graph().as_default():
cols_to_vars = {}
feature_column_lib.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(cols_to_vars)
age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/age')[0]
with tf_session.Session() as sess:
sess.run([variables_lib.global_variables_initializer()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self, _linear_classifier_fn, _linear_regressor_fn):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of LinearClassifier default warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of LinearRegressor default warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearRegressor and train to save a checkpoint.
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearRegressor, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = feature_column_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
# The provided regular expression will only warm-start the age variable
# and not the bias.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# Bias should still be zero from initialization.
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
"""Tests warm-starting with vocab remapping and partitioning."""
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = feature_column_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
# Create a LinearClassifier and train to save a checkpoint.
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = feature_column_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
age_in_years = feature_column_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[feature_column_lib.numeric_column('age')],
n_classes=4,
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.0),
# The 'age' variable correspond to the 'age_in_years' variable in the
# previous model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# The bias is also warm-started (with no name remapping).
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
|
apache-2.0
|
ueshin/apache-spark
|
python/pyspark/pandas/tests/test_repr.py
|
15
|
7832
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ReprTest(PandasOnSparkTestCase):
max_display_count = 23
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("display.max_rows", ReprTest.max_display_count)
@classmethod
def tearDownClass(cls):
reset_option("display.max_rows")
super().tearDownClass()
def test_repr_dataframe(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue(
repr(psdf).startswith(repr(psdf.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
def test_repr_series(self):
psser = ps.range(ReprTest.max_display_count).id
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count).id.rename()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.range(ReprTest.max_display_count + 1).id.rename()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count)]
).to_series()
self.assertTrue("Showing only the first" not in repr(psser))
self.assert_eq(repr(psser), repr(psser.to_pandas()))
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assertTrue("Showing only the first" in repr(psser))
self.assertTrue(
repr(psser).startswith(repr(psser.to_pandas().head(ReprTest.max_display_count)))
)
with option_context("display.max_rows", None):
psser = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
).to_series()
self.assert_eq(repr(psser), repr(psser.to_pandas()))
def test_repr_indexes(self):
psidx = ps.range(ReprTest.max_display_count).index
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_series().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.range(ReprTest.max_display_count + 1).index
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples([(100 * i, i) for i in range(ReprTest.max_display_count)])
self.assertTrue("Showing only the first" not in repr(psidx))
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assertTrue("Showing only the first" in repr(psidx))
self.assertTrue(
repr(psidx).startswith(
repr(psidx.to_pandas().to_frame().head(ReprTest.max_display_count).index)
)
)
with option_context("display.max_rows", None):
psidx = ps.MultiIndex.from_tuples(
[(100 * i, i) for i in range(ReprTest.max_display_count + 1)]
)
self.assert_eq(repr(psidx), repr(psidx.to_pandas()))
def test_html_repr(self):
psdf = ps.range(ReprTest.max_display_count)
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertTrue("Showing only the first" in psdf._repr_html_())
with option_context("display.max_rows", None):
psdf = ps.range(ReprTest.max_display_count + 1)
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
def test_repr_float_index(self):
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count)},
index=np.random.rand(ReprTest.max_display_count),
)
self.assertTrue("Showing only the first" not in repr(psdf))
self.assert_eq(repr(psdf), repr(psdf.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.a))
self.assert_eq(repr(psdf.a), repr(psdf.a.to_pandas()))
self.assertTrue("Showing only the first" not in repr(psdf.index))
self.assert_eq(repr(psdf.index), repr(psdf.index.to_pandas()))
self.assertTrue("Showing only the first" not in psdf._repr_html_())
self.assertEqual(psdf._repr_html_(), psdf.to_pandas()._repr_html_())
psdf = ps.DataFrame(
{"a": np.random.rand(ReprTest.max_display_count + 1)},
index=np.random.rand(ReprTest.max_display_count + 1),
)
self.assertTrue("Showing only the first" in repr(psdf))
self.assertTrue("Showing only the first" in repr(psdf.a))
self.assertTrue("Showing only the first" in repr(psdf.index))
self.assertTrue("Showing only the first" in psdf._repr_html_())
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_repr import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
mblondel/scikit-learn
|
sklearn/tests/test_lda.py
|
11
|
5671
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
"""Test LDA classification.
This checks that LDA implements fit and predict and returns correct values
for simple toy data.
"""
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
"""Test if the coefficients of the solvers are approximately the same.
"""
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
"""Test LDA transform.
"""
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1**2))
d2 /= np.sqrt(np.sum(d2**2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
"""Test if classification works correctly with differently scaled features.
"""
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
|
bsd-3-clause
|
dancingdan/tensorflow
|
tensorflow/tools/dist_test/python/census_widendeep.py
|
48
|
11896
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data")
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/examples/animation/histogram.py
|
6
|
1702
|
"""
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.animation as animation
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate(i):
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
ani = animation.FuncAnimation(fig, animate, 100, repeat=False)
plt.show()
|
mit
|
cjayb/kingjr_natmeg_arhus
|
JR_toolbox/skl_ica.py
|
2
|
16639
|
print("######################################################################")
print("# Parallel n-split k-stratified-fold continuous SVM Scikitlearn MVPA #")
print("# (c) Jean-Remi King 2012, jeanremi.king [at] gmail [dot] com #")
print("######################################################################")
# Implementation of a multivariate pattern analysis based on the scikit-learn
# toolbox (http://scikit-learn.org/stable/). It reads two .mat files
# (filenameX, filenamey) created by 'jr_classify.m'
#
# Function:
# skl_king_parallel.py filenameX filenamey [number_of_cores]
#
# Inputs:
# in filenameX:
# Xm: samples x features x classification matrix (e.g. trials x
# chans x time)
# in filenamey:
# y: vector indicating the class of each sample. Negative values
# will be used for generalization only. 0 indicates to-be-
# ignored samples.
# y2: cost/weights applied on each sample
# path: export directory
# nameX: export filename X
# namey: export filename y
# folding:type of folding(e.g. stratified)
# n_splits:number of splits
# n_folds: number of folds
# C: SVM penalization parameter
# compute_probas: compute logit fit
# compute_predict: compute traditional SVM
# fs_n: number of univariate features selected for classification
# dims: classification performed on dims dimensions
# dims_tg:classification generalized on dims_tg dimensions
#
# Ouputs:
# predict: prediction matrix (split x samples x dims x dimsg)
# predictg:same as predict for generalized samples
# probas: probas matrix (split x samples x dims x dimsg x class)
# probasg: same as probas for generalized samples
# coef: weight hyperplan vector
# all_folds:folding report (split x fold x samples)
# y_all: original y
# y: training y
# yg: generalized y
# filenameX:
# filenamey:
#
# Results are reported in: path + nameX + '_' + namey + "_results.mat"
###############################################################################
# (c) Jean-Remi King: jeanremi.king [at] gmail [dot] com
###############################################################################
# update 2012 11 29: fix 3rd dimension issue
# update 2012 11 13: fix bug str output on some python versions
# update 2012 11 02: change stratified kfolding y by y2
# update 2012 11 02: add np.copy to Xtrain and Xtest
# update 2012 11 01: correct feature selection coef bug when at 100 %
# update 2012 10 23: correct leaveoneout bug
# update 2012 10 23: correct major n_split new_order error
# update 2012 10 18: correct python/matlab dim incompatibility
# update 2012 10 18: correct error fs between 99 and 100 && remove Kbest
# update 2012 10 17: correct error n_features shape and add nice
# update 2012 10 01: correct prediction error+change loading results option
# update 2012 09 14: handle fs float error
# update 2012 09 14: pass n_cores to sys.arg
# version 2012 09 13: implementation of parallelization
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
from sklearn.externals.joblib import Parallel, delayed
import scipy.io as sio
from sklearn.preprocessing import Scaler
import cudaica as ci # GPU
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
if len(sys.argv) <= 3:
n_cores = -1
else:
n_cores = int(sys.argv[3])
print("cores: " + str(n_cores))
print(filenameX)
print(filenamey)
#-- Load data into python
mat = sio.loadmat(filenameX)
Xm_all = mat["Xm"] # data
if np.size(Xm_all.shape) == 2: # fix 3rd dimension issue
X = np.zeros(np.append(Xm_all.shape, 1))
X[:, :, 0] = Xm_all
Xm_all = X
#-- load classification parameters
mat = sio.loadmat(filenamey)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1 # svm penalization parameter
mat = sio.loadmat(filenamey, squeeze_me=True)
path = mat["path"]
nameX = mat["nameX"]
namey = mat["namey"]
folding = mat["folding"]
n_splits = mat["n_splits"] # svm penalization parameter
n_folds = mat["n_folds"] # fold number
svm_C = mat["C"] # svm penalization parameter
compute_probas = mat["compute_probas"] # svm penalization parameter
compute_predict = mat["compute_predict"] # svm penalization parameter
fs_n = mat["fs"] # feature selection
y_all = mat["y"] # class used for train and test
print(Xm_all.shape)
print(y_all.shape)
y2_all = mat["y2"] # class used for sample weights
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#-- classifier
clf = svm.SVC(kernel='linear', probability=True, C=svm_C)
#-- normalizer
scaler = Scaler()
#-- feature selection
if fs_n < 99.00:
fs = SelectPercentile(f_classif, percentile=fs_n)
elif fs_n > 99 and fs_n < 101:
fs = SelectKBest(f_classif, k=n_features)
else:
print("cfg.fs / fs_n must be > 0 and <= 100")
#-- results initialization
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
###############################################################################
#-- Define parallel cross validation
def my_pipeline(train, test,
Xm_shfl, y_shfl, sw_shfl, Xmg,
dims, fs, scaler, clf,
n_samples, n_dims, n_dims_tg, n_classes, wts, sph):
# component transformation
[n_trials, n_features, n_samples] = Xm_shfl.shape
Xm_shfl = Xm_shfl.transpose([1, 2, 0])
Xm_shfl = np.reshape(Xm_shfl, [n_features, n_samples * n_trials])
Xm_shfl = sph * wts * Xm_shfl
Xm_shfl = np.reshape(Xm_shfl, [n_features, n_samples, n_trials])
Xm_shfl = Xm_shfl.transpose([2, 0, 1])
Xmg = Xmg.transpose([1, 2, 0])
Xmg = np.reshape(Xmg, [n_features, n_samples * n_trials])
Xmg = sph * wts * Xmg
Xmg = np.reshape(Xmg, [n_features, n_samples, n_trials])
Xmg = Xmg.transpose([2, 0, 1])
# indicate opened fold
sys.stdout.write("<")
sys.stdout.flush()
# initialize results within a given fold
if compute_predict:
predict = np.zeros([n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
# apply different classification along dimension 0
for d in range(0, dims.shape[0]):
Xtrain = np.copy(Xm_shfl[train, :, dims[d]])
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN samples in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection
fs.fit(Xtrain, ytrain)
Xtrain = fs.transform(Xtrain)
# normalization
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
# SVM fit
clf.fit(Xtrain, ytrain, sample_weight=sw_train)
# retrieve features selected during univariate selection
if fs_n > 99 and fs_n < 101:
#uni_features = sorted(range(len(fs.pvalues_)),key=lambda x:fs.pvalues_[x])
uni_features = range(0, clf.coef_.shape[1])
else:
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# retrieve hyperplan (unselected features as 0)
coef[d, :, uni_features] = scaler.inverse_transform(clf.coef_).T
# generalize across all time points
for d_tg in range(0, n_dims_tg):
# select data
Xtest = np.copy(Xm_shfl[test, :, dims_tg[d, d_tg]])
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# feature selection from training
Xtest = fs.transform(Xtest)
# normalize from training
Xtest = scaler.transform(Xtest)
# generalize test samples
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[test[~test_nan], d, d_tg] = clf.predict(Xtest)
if compute_probas:
probas[test[~test_nan], d, d_tg, :] = clf.predict_proba(Xtest)
# predict on generalization sample
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc feature selection and normalization
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# compute prediction
if compute_predict:
predictg[~test_nan, d, d_tg] = clf.predict(Xtestg)
if compute_probas:
probasg[~test_nan, d, d_tg, :] = clf.predict_proba(Xtestg)
# summarize fold results
out = {
'coef': coef,
'predict': predict,
'predictg': predictg,
'probas': probas,
'probasg': probasg}
# indicate end of fold
sys.stdout.write(">")
sys.stdout.flush()
return out
###############################################################################
print("CLASSIFY")
#-- Shuffle split
for split in range(n_splits):
print("split " + str(split))
#-- shuffle order in case this is not the first split
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl = np.copy(y)
y_shfl = y_shfl[new_order]
y2_shfl = np.copy(y2)
y2_shfl = y2_shfl[new_order]
Xm_shfl = np.copy(Xm)
Xm_shfl = Xm_shfl[new_order, :, :]
sw_shfl = np.copy(sample_weight)
sw_shfl = sw_shfl[new_order]
else:
y_shfl = np.copy(y)
y2_shfl = np.copy(y2)
Xm_shfl = np.copy(Xm)
sw_shfl = np.copy(sample_weight)
#-- define crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y2_shfl, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y2_shfl.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds = y_shfl.shape[0]
cv = LeaveOneOut(n=y_shfl.shape[0])
else:
print("unknown crossvalidation method!")
# GPU transform
print "GPU ICA"
wtss = np.ndarray(shape=(n_features, n_features, n_folds), dtype=np.float64, order='F')
sphs = np.ndarray(shape=(n_features, n_features, n_folds), dtype=np.float64, order='F')
for fold, (train, test) in enumerate(cv):
print fold
# reshape traiining set in 2D
XtrainC = Xtrain[train, :, :].transpose([1, 2, 0]).reshape((n_features, -1), order='F')
# initialize
wts = np.ndarray(shape=(n_features, n_features), dtype=np.float64, order='F')
sph = np.ndarray(shape=(n_features, n_features), dtype=np.float64, order='F')
# Compulsory: elegir el dispositivo
ci.selectDevice(0) # chose with nvidia-smi
# Compulsory: initialize default configuration
cfg = ci.initDefaultConfig()
# Optional: show configuration
ci.printConfig(cfg)
ci.debugData(XtrainC)
#Compulsory: setear nchannels, nsamples
ci.setIntParameter(cfg, 'nchannels', XtrainC.shape[0])
ci.setIntParameter(cfg, 'nsamples', XtrainC.shape[1])
#Optional: other parameters
ci.setRealParameter(cfg, 'lrate', 0.000286758) # from MEG: should be optimized: always goes down
ci.setRealParameter(cfg, 'nochange', 1e-6) # change this for dirtier and faster computation
ci.setIntParameter(cfg, 'maxsteps', 256)
#~ ci.printConfig(cfg)
print "Checking"
#Compulsory: check configuration before running
ci.checkDefaultConfig(cfg)
ci.printConfig(cfg)
print "Transfer"
# Compulsory
ci.transfer2DDataTo(XtrainC, cfg)
# Preprocesar (optional: check)
# JR: disable sphering and apply it directly from skl
ci.setStringParameter(cfg, 'sphering', 'off')
ci.preprocess(cfg)
# Main function: ICA
ci.process(cfg)
# Postprocessing:
# ci.postprocess(cfg) # sorting componnents as a function of explained variance applied on GPU
# Retrieve data:
ci.transferSphereFrom(sph, cfg)
ci.transferWeightsFrom(wts, cfg)
# store sphering and weights
wtss[:, :, fold] = wts
sphs[:, :, fold] = sph
print "SVM Pipeline"
# Cross-validation computed in parallel
out = Parallel(n_jobs=n_cores)(delayed(my_pipeline)(
train=train,
test=test,
Xm_shfl=Xm_shfl,
y_shfl=y_shfl,
sw_shfl=sw_shfl,
Xmg=Xmg,
dims=dims,
fs=fs,
scaler=scaler,
clf=clf,
n_samples=n_samples,
n_dims=n_dims,
n_dims_tg=n_dims_tg,
n_classes=n_classes,
wts=np.reshape(wtss[:, :, fold], [n_features, n_features])
sph=np.reshape(sphs[:, :, fold], [n_features, n_features])
) for fold, (train, test) in enumerate(cv))
# reorder results folds and splits
for fold, (train, test) in enumerate(cv):
all_folds[split, fold, train] = 1
all_folds[split, fold, test] = 0
coef[split, fold, :, :, :] = out[fold]['coef']
if compute_predict:
predict[split, new_order[test], :, :] = out[fold]['predict'][test, :, :]
predictg[split, :, :, :, fold] = out[fold]['predictg']
if compute_probas:
probas[split, new_order[test], :, :, :] = out[fold]['probas'][test, :, :, :]
probasg[split, :, :, :, :, fold] = out[fold]['probasg']
all_folds[split, :, new_order] = all_folds[split, :, :].T
###############################################################################
print("EXPORT DATA")
mat['predict'] = predict
mat['predictg'] = predictg
mat['probas'] = probas
mat['probasg'] = probasg
mat['coef'] = coef
mat['all_folds'] = all_folds
mat['y_all'] = y_all
mat['y'] = y
mat['yg'] = yg
mat['filenameX'] = filenameX
mat['filenamey'] = filenamey
print nameX
print namey
print path
output = str(path) + str(nameX) + '_' + str(namey) + "_results.mat"
print(output)
sio.savemat(output, mat)
|
bsd-3-clause
|
zhenv5/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
72
|
4795
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
rohit21122012/DCASE2013
|
runs/2016/dnn2016med_nomel/dnn6.py
|
2
|
32057
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
#import sys
#sys.path.insert(0, '../')
from src.ui import *
from src.general import *
from src.files import *
from src.features import *
from src.dataset import *
from src.evaluation import *
import numpy
import csv
import argparse
import textwrap
from sklearn.metrics import confusion_matrix
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import timeit
from sklearn.externals import joblib
from sklearn import preprocessing as pp
from sklearn import mixture
from sklearn.svm import SVC
import skflow
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
def main(argv):
matplotlib.use('Agg')
start = timeit.default_timer()
numpy.random.seed(123456) # let's make randomization predictable
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
params = load_parameters('dnn6.yaml')
params = process_parameters(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
foot()
plot_name = params['classifier']['method']
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
#plot_name = params['classifier']['method'] + str(params['classifier']['parameters']['n_components'])
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'],
plot_name=plot_name)
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at ["+params['path']['challenge_results']+"]"
print " "
end = timeit.default_timer()
print " "
print "Total Time : " + str(end-start)
print " "
final_result['time'] = end-start
joblib.dump(final_result, 'result' + plot_name + '.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['features'] = os.path.join(params['path']['base'], params['path']['features'],
params['features']['hash'])
params['path']['feature_normalizers'] = os.path.join(params['path']['base'], params['path']['feature_normalizers'],
params['features']['hash'])
params['path']['models'] = os.path.join(params['path']['base'], params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
params['path']['results'] = os.path.join(params['path']['base'], params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True, fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds', overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'dnn6':
# model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label,len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
#print tot_data['y'].shape, numpy.repeat(label,len(data[label]), axis=0).shape
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn6':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'], logdir='dnn6/dnn6model1log/')
clf.save('dnn6/dnn6model1')
print model_container['models']
# Save models
save_data(current_model_file, model_container)
#clf.save(current_model_file);
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='dnn6', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['dnn6']
classifier method, currently only GMM supported
(Default value='dnn6')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'dnn6':
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True, fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'dnn6':
current_result = dataset.scene_labels[do_classification_dnn6(feature_data, model_container)]
else:
raise ValueError("Unknown classifier method ["+classifier_method+"]")
# Store the result
results.append((dataset.absolute_to_relative(item['file']), current_result))
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn6(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn6/dnn6model1');
#for label_id, label in enumerate(model_container['models']):
# logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)),0)
#print logls
classification_result_id = numpy.argmax(logls)
return classification_result_id
def plot_cm(cm, targets, title='Confusion Matrix', cmap=plt.cm.Blues, norm=True, name='Plot'):
if(norm):
cm = cm.astype(float)/cm.sum(axis=1)[:, numpy.newaxis]
fig = plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title + ' ' + name)
plt.colorbar()
tick_marks = numpy.arange(len(targets))
plt.xticks(tick_marks, targets,rotation=45)
plt.yticks(tick_marks, targets)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
# plt.show()
fig.savefig(name + '.png')
#plt.close()
def do_system_evaluation(dataset, result_path, plot_name, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
print str(dataset.scene_label_count)
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
#print dataset.file_meta(result[0])[0]['scene_label'] + ' ' + result[1]
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
#print ' '
print tot_cm
#plot_cm(tot_cm, dataset.scene_labels,name=plot_name)
#joblib.dump(tot_cm, plot_name + '.pkl')
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm))/numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold'+str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy')+fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][label] * 100)+fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold-1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100)+fold_values
final_result['result'] = results
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
|
mit
|
panmari/tensorflow
|
tensorflow/examples/skflow/iris.py
|
1
|
1206
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import skflow
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respecitvely.
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
apache-2.0
|
yavalvas/yav_com
|
build/matplotlib/examples/pylab_examples/barchart_demo2.py
|
6
|
4284
|
"""
Thanks Josh Hemann for the example
This examples comes from an application in which grade school gym
teachers wanted to be able to show parents how their child did across
a handful of fitness tests, and importantly, relative to how other
children did. To extract the plotting code for demo purposes, we'll
just make up some data for little Johnny Doe...
"""
import numpy as np
import matplotlib.pyplot as plt
import pylab
from matplotlib.ticker import MaxNLocator
student = 'Johnny Doe'
grade = 2
gender = 'boy'
cohortSize = 62 # The number of other 2nd grade boys
numTests = 5
testNames = ['Pacer Test', 'Flexed Arm\n Hang', 'Mile Run', 'Agility',
'Push Ups']
testMeta = ['laps', 'sec', 'min:sec', 'sec', '']
scores = ['7', '48', '12:52', '17', '14']
rankings = np.round(np.random.uniform(0, 1, numTests)*100, 0)
fig, ax1 = plt.subplots(figsize=(9, 7))
plt.subplots_adjust(left=0.115, right=0.88)
fig.canvas.set_window_title('Eldorado K-8 Fitness Chart')
pos = np.arange(numTests)+0.5 # Center bars on the Y-axis ticks
rects = ax1.barh(pos, rankings, align='center', height=0.5, color='m')
ax1.axis([0, 100, 0, 5])
pylab.yticks(pos, testNames)
ax1.set_title('Johnny Doe')
plt.text(50, -0.5, 'Cohort Size: ' + str(cohortSize),
horizontalalignment='center', size='small')
# Set the right-hand Y-axis ticks and labels and set X-axis tick marks at the
# deciles
ax2 = ax1.twinx()
ax2.plot([100, 100], [0, 5], 'white', alpha=0.1)
ax2.xaxis.set_major_locator(MaxNLocator(11))
xticks = pylab.setp(ax2, xticklabels=['0', '10', '20', '30', '40', '50', '60',
'70', '80', '90', '100'])
ax2.xaxis.grid(True, linestyle='--', which='major', color='grey',
alpha=0.25)
#Plot a solid vertical gridline to highlight the median position
plt.plot([50, 50], [0, 5], 'grey', alpha=0.25)
# Build up the score labels for the right Y-axis by first appending a carriage
# return to each string and then tacking on the appropriate meta information
# (i.e., 'laps' vs 'seconds'). We want the labels centered on the ticks, so if
# there is no meta info (like for pushups) then don't add the carriage return to
# the string
def withnew(i, scr):
if testMeta[i] != '':
return '%s\n' % scr
else:
return scr
scoreLabels = [withnew(i, scr) for i, scr in enumerate(scores)]
scoreLabels = [i+j for i, j in zip(scoreLabels, testMeta)]
# set the tick locations
ax2.set_yticks(pos)
# set the tick labels
ax2.set_yticklabels(scoreLabels)
# make sure that the limits are set equally on both yaxis so the ticks line up
ax2.set_ylim(ax1.get_ylim())
ax2.set_ylabel('Test Scores')
#Make list of numerical suffixes corresponding to position in a list
# 0 1 2 3 4 5 6 7 8 9
suffixes = ['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th']
ax2.set_xlabel('Percentile Ranking Across ' + str(grade) + suffixes[grade]
+ ' Grade ' + gender.title() + 's')
# Lastly, write in the ranking inside each bar to aid in interpretation
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
# Figure out what the last digit (width modulo 10) so we can add
# the appropriate numerical suffix (e.g., 1st, 2nd, 3rd, etc)
lastDigit = width % 10
# Note that 11, 12, and 13 are special cases
if (width == 11) or (width == 12) or (width == 13):
suffix = 'th'
else:
suffix = suffixes[lastDigit]
rankStr = str(width) + suffix
if (width < 5): # The bars aren't wide enough to print the ranking inside
xloc = width + 1 # Shift the text to the right side of the right edge
clr = 'black' # Black against white background
align = 'left'
else:
xloc = 0.98*width # Shift the text to the left side of the right edge
clr = 'white' # White on magenta
align = 'right'
# Center the text vertically in the bar
yloc = rect.get_y()+rect.get_height()/2.0
ax1.text(xloc, yloc, rankStr, horizontalalignment=align,
verticalalignment='center', color=clr, weight='bold')
plt.show()
|
mit
|
DonBeo/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
2
|
13890
|
import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
|
bsd-3-clause
|
SteveDiamond/cvxpy
|
cvxpy/cvxcore/tests/python/364A_scripts/speed.py
|
4
|
5871
|
# data for vehicle speed scheduling problem.
# contains quantities: n, a, b, c, d, smin, smax, tau_min, tau_max
import numpy as np
from cvxpy import *
import matplotlib.pyplot as plt
import copy
import time
n = 100
a = 1
b = 6
c = 10
ANSWERS = []
TIME = 0
d = np.transpose(np.matrix(
[1.9501, 1.2311, 1.6068, 1.4860, 1.8913, 1.7621, 1.4565, 1.0185, 1.8214, 1.4447,
1.6154, 1.7919, 1.9218, 1.7382, 1.1763, 1.4057, 1.9355, 1.9169, 1.4103, 1.8936,
1.0579, 1.3529, 1.8132, 1.0099, 1.1389, 1.2028, 1.1987, 1.6038, 1.2722, 1.1988,
1.0153, 1.7468, 1.4451, 1.9318, 1.4660, 1.4186, 1.8462, 1.5252, 1.2026, 1.6721,
1.8381, 1.0196, 1.6813, 1.3795, 1.8318, 1.5028, 1.7095, 1.4289, 1.3046, 1.1897,
1.1934, 1.6822, 1.3028, 1.5417, 1.1509, 1.6979, 1.3784, 1.8600, 1.8537, 1.5936,
1.4966, 1.8998, 1.8216, 1.6449, 1.8180, 1.6602, 1.3420, 1.2897, 1.3412, 1.5341,
1.7271, 1.3093, 1.8385, 1.5681, 1.3704, 1.7027, 1.5466, 1.4449, 1.6946, 1.6213,
1.7948, 1.9568, 1.5226, 1.8801, 1.1730, 1.9797, 1.2714, 1.2523, 1.8757, 1.7373,
1.1365, 1.0118, 1.8939, 1.1991, 1.2987, 1.6614, 1.2844, 1.4692, 1.0648, 1.9883]))
smin = np.transpose(np.matrix(
[0.7828, 0.6235, 0.7155, 0.5340, 0.6329, 0.4259, 0.7798, 0.9604, 0.7298, 0.8405, 0.4091,
0.5798, 0.9833, 0.8808, 0.6611, 0.7678, 0.9942, 0.2592, 0.8029, 0.2503, 0.6154, 0.5050,
1.0744, 0.2150, 0.9680, 1.1708, 1.1901, 0.9889, 0.6387, 0.6983, 0.4140, 0.8435, 0.5200,
1.1601, 0.9266, 0.6120, 0.9446, 0.4679, 0.6399, 1.1334, 0.8833, 0.4126, 1.0392, 0.8288,
0.3338, 0.4071, 0.8072, 0.8299, 0.5705, 0.7751, 0.6514, 0.2439, 0.2272, 0.5127, 0.2129,
0.5840, 0.8831, 0.2928, 0.2353, 0.8124, 0.8085, 0.2158, 0.2164, 0.3901, 0.7869, 0.2576,
0.5676, 0.8315, 0.9176, 0.8927, 0.2841, 0.6544, 0.6418, 0.5533, 0.3536, 0.8756, 0.8992,
0.9275, 0.6784, 0.7548, 0.3210, 0.6508, 0.9159, 1.0928, 0.4731, 0.4548, 1.0656, 0.4324,
1.0049, 1.1084, 0.4319, 0.4393, 0.2498, 0.2784, 0.8408, 0.3909, 1.0439, 0.3739, 0.3708,
1.1943]))
smax = np.transpose(np.matrix(
[1.9624, 1.6036, 1.6439, 1.5641, 1.7194, 1.9090, 1.3193, 1.3366, 1.9470, 2.8803, 2.5775,
1.4087, 1.6039, 2.9266, 1.4369, 2.3595, 3.2280, 1.8890, 2.8436, 0.5701, 1.1894, 2.4425,
2.2347, 2.2957, 2.7378, 2.8455, 2.1823, 1.6209, 1.2499, 1.3805, 1.5589, 2.8554, 1.8005,
3.0920, 2.1482, 1.8267, 2.1459, 1.5924, 2.7431, 1.4445, 1.7781, 0.8109, 2.7256, 2.4290,
2.5997, 1.8125, 1.9073, 1.5275, 2.1209, 2.5419, 1.7032, 0.5636, 1.3669, 2.3200, 2.1006,
2.7239, 2.8726, 1.3283, 1.7769, 2.5750, 1.4963, 2.3254, 1.6548, 1.9537, 1.5557, 1.6551,
2.7307, 1.8018, 2.5287, 1.9765, 1.8387, 2.3525, 1.7362, 1.6805, 1.9640, 2.8508, 1.9424,
2.0780, 2.1677, 2.1863, 2.0541, 1.9734, 2.7687, 2.3715, 1.1449, 2.1560, 3.3310, 2.3456,
2.7120, 2.3783, 0.9611, 2.0690, 1.2805, 0.8585, 2.2744, 2.3369, 2.6918, 2.6728, 2.5941,
1.6120]))
tau_min = np.transpose(np.matrix(
[1.0809, 2.7265, 3.5118, 5.3038, 5.4516, 7.1648, 9.2674, 12.1543, 14.4058, 16.6258,
17.9214, 19.8242, 22.2333, 22.4849, 25.3213, 28.0691, 29.8751, 30.6358, 33.2561,
34.7963, 36.9943, 38.2610, 41.1451, 41.3613, 43.0215, 43.8974, 46.4713, 47.4786,
49.5192, 49.6795, 50.7495, 52.2444, 53.5477, 55.2351, 57.0850, 57.4250, 60.1198,
62.3834, 64.7568, 67.2016, 69.2116, 69.8143, 70.6335, 72.5122, 74.1228, 74.3013,
74.5682, 75.3821, 76.6093, 78.0315, 80.7584, 82.5472, 83.5340, 84.9686, 86.7601,
87.2445, 89.7329, 92.6013, 94.3879, 94.4742, 96.9105, 98.7409, 100.8453, 101.1219,
102.3966, 103.5233, 104.0218, 106.5212, 109.0372, 110.3920, 113.2618, 113.7033,
116.3131, 118.6214, 119.9539, 121.8157, 124.6708, 126.5908, 127.3328, 128.3909,
128.9545, 130.4264, 131.6542, 133.0448, 134.8776, 135.0912, 136.0340, 137.8591,
138.3842, 140.2473, 140.9852, 142.7472, 144.2654, 145.6597, 147.2840, 150.1110,
151.1363, 152.3417, 153.2647, 154.4994]))
tau_max = np.transpose(np.matrix(
[4.6528, 6.5147, 7.5178, 9.7478, 9.0641, 10.3891, 13.1540, 16.0878, 17.4352, 20.9539,
22.3695, 23.3875, 25.7569, 26.9019, 29.8890, 33.0415, 33.8218, 35.4414, 37.1583, 39.4054,
41.6520, 41.5935, 44.9329, 45.4028, 47.4577, 48.0358, 50.3929, 51.3692, 52.6947, 53.5665,
54.4821, 55.8495, 58.2514, 59.7541, 61.9845, 61.5409, 63.1482, 66.5758, 69.3892, 72.1558,
72.6555, 74.2216, 74.6777, 77.3780, 78.5495, 77.7574, 78.4675, 78.7265, 81.5470, 81.7429,
83.8565, 87.0579, 88.3237, 88.5409, 90.2625, 92.1100, 92.9949, 97.4829, 98.7916, 99.1695,
100.3291, 102.6510, 104.0075, 105.8242, 106.5207, 107.1619, 107.7716, 111.2568, 112.7815,
113.5394, 116.6615, 116.8022, 120.4465, 121.8652, 123.9981, 125.0498, 129.2106, 130.3409,
131.9796, 131.4842, 133.1503, 135.3247, 135.2318, 137.8225, 138.0808, 138.2218, 139.5026,
142.7253, 141.5105, 143.7757, 145.9842, 146.1712, 148.2622, 149.2407, 151.6295, 155.0270,
155.6694, 156.6739, 156.5266, 157.6903]))
t = Variable(n)
obj = a * (d[0] * t[0] ** -1) ** 2 + b * t[0] **-1 *d[0] + c
for i in range( 1, n ):
obj += a * (d[i] * t[i] ** -1) **2 + b * d[i] * t[i] ** -1 + c
obj = Minimize(obj)
constraints = []
sumSoFar = t[0]
for i in range(1,n):
constraints.append( sumSoFar <= tau_max[i-1] )
constraints.append( tau_min[i-1] <= sumSoFar )
sumSoFar = copy.deepcopy(sumSoFar) + t[i]
tmax = np.zeros( (n,1) )
tmin = np.zeros( (n,1) )
for i in range(len(tmax)):
tmax[i] = smin[i] ** -1 * d[i]
tmin[i] = smax[i] ** -1 * d[i]
constraints.append(tmin <= t )
constraints.append( t <= tmax)
s = np.zeros((n,1))
prob = Problem(obj, constraints)
tic =time.time()
ANSWERS.append(prob.solve())
toc = time.time()
TIME += toc - tic
pass #print toc - tic
for i in range(n):
s[i] = d[i] / t.value[i]
pass #plt.plot(s)
pass #plt.show()
|
gpl-3.0
|
maxlikely/scikit-learn
|
examples/plot_classification_probability.py
|
4
|
2375
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, as
well as L1 and L2 penalized logistic regression.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD Style.
import pylab as pl
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True)}
n_classifiers = len(classifiers)
pl.figure(figsize=(3 * 2, n_classifiers * 2))
pl.subplots_adjust(bottom=.2, top=.95)
for index, (name, classifier) in enumerate(classifiers.iteritems()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
pl.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
pl.title("Class %d" % k)
if k == 0:
pl.ylabel(name)
imshow_handle = pl.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
pl.xticks(())
pl.yticks(())
idx = (y_pred == k)
if idx.any():
pl.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = pl.axes([0.15, 0.04, 0.7, 0.05])
pl.title("Probability")
pl.colorbar(imshow_handle, cax=ax, orientation='horizontal')
pl.show()
|
bsd-3-clause
|
pianomania/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
140
|
1521
|
"""
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20., 30.)
y = np.linspace(-20., 40.)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
rohit12/atomspace
|
scripts/make_benchmark_graphs.py
|
56
|
3139
|
#!/usr/bin/env python
# Requires matplotlib for graphing
# reads *_benchmark.csv files as output by atomspace_bm and turns them into
# graphs.
import csv
import numpy as np
import matplotlib.colors as colors
#import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#import matplotlib.font_manager as font_manager
import glob
import pdb
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def graph_file(fn,delta_rss=True):
print "Graphing " + fn
records = csv.reader(open(fn,'rb'),delimiter=",")
sizes=[]; times=[]; times_seconds=[]; memories=[]
for row in records:
sizes.append(int(row[0]))
times.append(int(row[1]))
memories.append(int(row[2]))
times_seconds.append(float(row[3]))
left, width = 0.1, 0.8
rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height
rect2 = [left, 0.1, width, 0.4]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor)
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax1.plot(sizes,times_seconds,color='black')
if len(times_seconds) > 1000:
ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue')
if delta_rss:
oldmemories = list(memories)
for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1]
ax2.plot(sizes,memories,color='black')
for label in ax1.get_xticklabels():
label.set_visible(False)
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 7 ticks, pruning the upper and lower so they don't overlap
# with other ticks
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax1.yaxis.set_major_formatter(fmt)
ax2.yaxis.set_major_locator(MyLocator(7, prune='upper'))
fmt = mticker.ScalarFormatter()
fmt.set_powerlimits((-3, 4))
ax2.yaxis.set_major_formatter(fmt)
ax2.yaxis.offsetText.set_visible(False)
fig.show()
size = int(fmt.orderOfMagnitude) / 3
labels = ["B","KB","MB","GB"]
label = labels[size]
labels = ["","(10s)","(100s)"]
label += " " + labels[int(fmt.orderOfMagnitude) % 3]
ax2.set_xlabel("AtomSpace Size")
ax2.set_ylabel("RSS " + label)
ax1.set_ylabel("Time (seconds)")
ax1.set_title(fn)
fig.show()
fig.savefig(fn+".png",format="png")
files_to_graph = glob.glob("*_benchmark.csv")
for fn in files_to_graph:
graph_file(fn);
|
agpl-3.0
|
pradyu1993/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
3
|
2804
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print __doc__
# Author: Alexandre Gramfort <[email protected]>
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(0) # Standardize data (easier to set the rho parameter)
###############################################################################
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print "Computing regularization path using the lasso..."
models = lasso_path(X, y, eps=eps)
alphas_lasso = np.array([model.alpha for model in models])
coefs_lasso = np.array([model.coef_ for model in models])
print "Computing regularization path using the positive lasso..."
models = lasso_path(X, y, eps=eps, positive=True)
alphas_positive_lasso = np.array([model.alpha for model in models])
coefs_positive_lasso = np.array([model.coef_ for model in models])
print "Computing regularization path using the elastic net..."
models = enet_path(X, y, eps=eps, rho=0.8)
alphas_enet = np.array([model.alpha for model in models])
coefs_enet = np.array([model.coef_ for model in models])
print "Computing regularization path using the positve elastic net..."
models = enet_path(X, y, eps=eps, rho=0.8, positive=True)
alphas_positive_enet = np.array([model.alpha for model in models])
coefs_positive_enet = np.array([model.coef_ for model in models])
###############################################################################
# Display results
pl.figure(1)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_lasso)
l2 = pl.plot(coefs_enet, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and Elastic-Net Paths')
pl.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
pl.axis('tight')
pl.figure(2)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_lasso)
l2 = pl.plot(coefs_positive_lasso, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and positive Lasso')
pl.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
pl.axis('tight')
pl.figure(3)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_enet)
l2 = pl.plot(coefs_positive_enet, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Elastic-Net and positive Elastic-Net')
pl.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
birdsarah/bokeh
|
sphinx/source/docs/tutorials/exercises/boxplot.py
|
22
|
2576
|
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show
# Generate some synthetic time series for six different categories
cats = list("abcdef")
score = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
score[g == l] += i // 2
df = pd.DataFrame(dict(score=score, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# create a figure with the categories as the default x-range
p = figure(title="", tools="", background_fill="#EFE8E2", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# Draw the upper segment extending from the box plot using `p.segment` which
# takes x0, x1 and y0, y1 as data
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
# EXERCISE: use `p.segment` to draw the lower segment
# Draw the upper box of the box plot using `p.rect`
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `p.rect` to draw the bottom box with a different color
# OK here we use `p.rect` to draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# EXERCISE: use `p.circle` to draw the outliers
# EXERCISE: use `p.grid`, `p.axis`, etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
show(p)
|
bsd-3-clause
|
einarhuseby/arctic
|
tests/unit/tickstore/test_toplevel.py
|
3
|
9526
|
from mock import Mock, patch, MagicMock, create_autospec, sentinel, call
import pytest
from datetime import datetime as dt
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
from mockextras import when
from arctic.date import DateRange, mktz
from arctic.exceptions import OverlappingDataException
from arctic.tickstore.toplevel import TopLevelTickStore, TickStoreLibrary
from dateutil.rrule import rrule, DAILY
from arctic.tickstore.tickstore import TickStore
def test_raise_exception_if_daterange_is_not_provided():
store = TopLevelTickStore(Mock())
with pytest.raises(Exception) as e:
store._get_library_metadata(None)
assert "A date range must be provided" in str(e)
def test_raise_exception_if_date_range_does_not_contain_start_date():
store = TopLevelTickStore(Mock())
dr = DateRange(start=None, end=dt(2011, 1, 1))
with pytest.raises(Exception) as e:
store._get_library_metadata(dr)
assert "The date range {0} must contain a start and end date".format(dr) in str(e)
def test_raise_exception_if_date_range_does_not_contain_end_date():
store = TopLevelTickStore(Mock())
dr = DateRange(start=dt(2011, 1, 1), end=None)
with pytest.raises(Exception) as e:
store._get_library_metadata(dr)
assert "The date range {0} must contain a start and end date".format(dr) in str(e)
def test_raise_exception_if_date_range_does_not_contain_start_and_end_date():
store = TopLevelTickStore(Mock())
dr = DateRange(start=None, end=None)
with pytest.raises(Exception) as e:
store._get_library_metadata(dr)
assert "The date range {0} must contain a start and end date".format(dr) in str(e)
def test_raise_exception_and_log_an_error_if_an_invalid_library_name_is_added():
arctic_lib = MagicMock()
arctic_lib.arctic.__getitem__.side_effect = Exception()
store = TopLevelTickStore(arctic_lib)
with patch("arctic.tickstore.toplevel.logger") as mock_logger:
with pytest.raises(Exception):
store.add(None, "blah")
mock_logger.error.assert_called_once_with("Could not load library")
def test_raise_exception_if_date_range_overlaps():
self = create_autospec(TopLevelTickStore, _arctic_lib=MagicMock())
self._get_library_metadata.return_value = [TickStoreLibrary('lib1', None), ]
with pytest.raises(OverlappingDataException) as e:
TopLevelTickStore.add(self, DateRange(start=dt(2010, 1, 1), end=dt(2011, 1, 1, 23, 59, 59, 999000)), "blah")
assert "There are libraries that overlap with the date range:" in str(e)
@pytest.mark.parametrize(('start', 'end', 'expected_start', 'expected_end'),
[(dt(2010, 1, 1, tzinfo=mktz('UTC')), dt(2010, 12, 31, 23, 59, 59, 999000, tzinfo=mktz('UTC')),
dt(2010, 1, 1, tzinfo=mktz('UTC')), dt(2010, 12, 31, 23, 59, 59, 999000, tzinfo=mktz('UTC'))),
(dt(2010, 1, 1), dt(2010, 12, 31, 23, 59, 59, 999000), dt(2010, 1, 1, tzinfo=mktz('UTC')),
dt(2010, 12, 31, 23, 59, 59, 999000, tzinfo=mktz('UTC'))),
(dt(2009, 12, 31, 19, tzinfo=mktz('America/New_York')), dt(2010, 12, 31, 18, 59, 59, 999000, tzinfo=mktz('America/New_York')),
dt(2010, 1, 1, tzinfo=mktz('UTC')), dt(2010, 12, 31, 23, 59, 59, 999000, tzinfo=mktz('UTC')))
])
def test_add_library_to_colllection_if_date_range_is_on_UTC_or_naive_day_boundaries(start, end, expected_start, expected_end):
self = create_autospec(TopLevelTickStore, _arctic_lib=MagicMock(), _collection=MagicMock())
self._get_library_metadata.return_value = []
TopLevelTickStore.add(self, DateRange(start=start, end=end), "blah")
self._collection.update_one.assert_called_once_with({'library_name': "blah"},
{'$set':
{'start': expected_start,
'end': expected_end}}, upsert=True)
@pytest.mark.parametrize(('start', 'end'),
[(dt(2010, 1, 1, 2, tzinfo=mktz('UTC')), dt(2011, 1, 1, tzinfo=mktz('UTC'))),
(dt(2010, 1, 1, tzinfo=mktz('UTC')), dt(2011, 1, 1, 2, tzinfo=mktz('UTC'))),
(dt(2010, 1, 1, 2, tzinfo=mktz('UTC')), dt(2011, 1, 1, 2, tzinfo=mktz('UTC'))),
(dt(2010, 1, 1, 2), dt(2011, 1, 1)),
(dt(2010, 1, 1), dt(2011, 1, 1, 2)),
(dt(2010, 1, 1, 2), dt(2011, 1, 1, 2)),
(dt(2009, 12, 31, 21, 10, tzinfo=mktz('America/New_York')), dt(2010, 12, 31, tzinfo=mktz('America/New_York'))),
(dt(2009, 12, 31, tzinfo=mktz('America/New_York')), dt(2010, 12, 31, tzinfo=mktz('America/New_York'))),
(dt(2009, 12, 31, 21, 10, tzinfo=mktz('America/New_York')), dt(2010, 12, 31, 9, 21, tzinfo=mktz('America/New_York')))
])
def test_raise_error_add_library_is_called_with_a_date_range_not_on_day_boundaries(start, end):
with pytest.raises(AssertionError) as e:
self = create_autospec(TopLevelTickStore, _arctic_lib=MagicMock(), _collection=MagicMock())
self._get_library_metadata.return_value = []
TopLevelTickStore.add(self, DateRange(start=start, end=end), "blah")
assert "Date range should fall on UTC day boundaries" in str(e)
@pytest.mark.parametrize(('start', 'end', 'expected_start_index', 'expected_end_index'),
[(dt(2010, 1, 1), dt(2010, 1, 5), 0, 3),
(dt(2010, 1, 1), dt(2010, 1, 6), 0, 3),
(dt(2010, 1, 1, 1), dt(2010, 1, 6), 1, 3),
(dt(2010, 1, 1, 1), dt(2010, 1, 4, 2), 1, 2),
(dt(2009, 1, 1), dt(2010, 1, 5), 0, 3),
])
def test_slice_pandas_dataframe(start, end, expected_start_index, expected_end_index):
top_level_tick_store = TopLevelTickStore(Mock())
dates = pd.date_range('20100101', periods=5, freq='2D')
data = pd.DataFrame(np.random.randn(5, 4), index=dates, columns=list('ABCD'))
expected = data.ix[expected_start_index:expected_end_index]
result = top_level_tick_store._slice(data, start, end)
assert_frame_equal(expected, result), '{}\n{}'.format(expected, result)
@pytest.mark.parametrize(('start', 'end', 'expected_start_index', 'expected_end_index'),
[(dt(2010, 1, 1), dt(2010, 1, 5), 0, 3),
(dt(2010, 1, 1), dt(2010, 1, 6), 0, 3),
(dt(2010, 1, 1, 1), dt(2010, 1, 6), 1, 3),
(dt(2010, 1, 1, 1), dt(2010, 1, 4, 2), 1, 2),
(dt(2009, 1, 1), dt(2010, 1, 5), 0, 3),
])
def test_slice_list_of_dicts(start, end, expected_start_index, expected_end_index):
top_level_tick_store = TopLevelTickStore(Mock())
dates = list(rrule(DAILY, count=5, dtstart=dt(2010, 1, 1), interval=2))
data = [{'index': date, 'A': val} for date, val in zip(dates, range(5))]
expected = data[expected_start_index:expected_end_index]
result = top_level_tick_store._slice(data, start, end)
assert expected == result
def test_write_pandas_data_to_right_libraries():
self = create_autospec(TopLevelTickStore, _arctic_lib=MagicMock(), _collection=MagicMock())
self._collection.find.return_value = [{'library_name': sentinel.libname1, 'start': sentinel.st1, 'end': sentinel.end1},
{'library_name': sentinel.libname2, 'start': sentinel.st2, 'end': sentinel.end2}]
slice1 = range(2)
slice2 = range(4)
when(self._slice).called_with(sentinel.data, sentinel.st1, sentinel.end1).then(slice1)
when(self._slice).called_with(sentinel.data, sentinel.st2, sentinel.end2).then(slice2)
mock_lib1 = Mock()
mock_lib2 = Mock()
when(self._arctic_lib.arctic.__getitem__).called_with(sentinel.libname1).then(mock_lib1)
when(self._arctic_lib.arctic.__getitem__).called_with(sentinel.libname2).then(mock_lib2)
TopLevelTickStore.write(self, 'blah', sentinel.data)
mock_lib1.write.assert_called_once_with('blah', slice1)
mock_lib2.write.assert_called_once_with('blah', slice2)
def test_read():
self = create_autospec(TopLevelTickStore)
tsl = TickStoreLibrary(create_autospec(TickStore), create_autospec(DateRange))
self._get_libraries.return_value = [tsl, tsl]
dr = create_autospec(DateRange)
with patch('pandas.concat') as concat:
res = TopLevelTickStore.read(self, sentinel.symbol, dr,
columns=sentinel.include_columns,
include_images=sentinel.include_images)
assert concat.call_args_list == [call([tsl.library.read.return_value,
tsl.library.read.return_value])]
assert res == concat.return_value
assert tsl.library.read.call_args_list == [call(sentinel.symbol, tsl.date_range.intersection.return_value,
sentinel.include_columns, include_images=sentinel.include_images),
call(sentinel.symbol, tsl.date_range.intersection.return_value,
sentinel.include_columns, include_images=sentinel.include_images)]
|
lgpl-2.1
|
Clyde-fare/scikit-learn
|
sklearn/grid_search.py
|
32
|
36586
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
bsd-3-clause
|
vybstat/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py
|
256
|
2406
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
CCBatIIT/AlGDock
|
AlGDock/ForceFields/Grid/test_Interpolation.py
|
1
|
2560
|
import AlGDock
from MMTK import *
import Interpolation
from MMTK.ForceFields.ForceFieldTest import gradientTest
universe = InfiniteUniverse()
universe.atom1 = Atom('C', position=Vector(1.1, 0.5, 1.5))
universe.atom1.test_charge = 1.
universe.atom2 = Atom('C', position=Vector(1.553, 1.724, 1.464))
universe.atom2.test_charge = -0.2
param_sets = [\
{'interpolation_type':'Trilinear', 'inv_power':None, 'energy_thresh':-1.0},
{'interpolation_type':'Trilinear', 'inv_power':2, 'energy_thresh':-1.0},
{'interpolation_type':'Trilinear', 'inv_power':4, 'energy_thresh':-1.0},
{'interpolation_type':'BSpline', 'inv_power':None, 'energy_thresh':-1.0},
{'interpolation_type':'BSpline', 'inv_power':-3, 'energy_thresh':-1.0}]
# {'interpolation_type':'Trilinear', 'inv_power':None, 'energy_thresh':10.0},
# {'interpolation_type':'CatmullRom', 'inv_power':None, 'energy_thresh':-1.0},
# {'interpolation_type':'CatmullRom', 'inv_power':-3, 'energy_thresh':-1.0},
# {'interpolation_type':'Tricubic', 'inv_power':None, 'energy_thresh':-1.0},
# {'interpolation_type':'Tricubic', 'inv_power':-3, 'energy_thresh':-1.0}]
steps = 50000
import numpy as np
from collections import OrderedDict
Es = OrderedDict()
x=np.linspace(1.35,1.6,steps)
for params in param_sets:
print
print params
print
ForceField = Interpolation.InterpolationForceField(\
'../../../Example/grids/LJa.nc',
interpolation_type=params['interpolation_type'],
inv_power=params['inv_power'],
energy_thresh=params['energy_thresh'],
scaling_property='test_charge')
universe.setForceField(ForceField)
universe.atom1.setPosition(Vector(x[0],0.5,1.5))
print 'Energy Terms:'
print universe.energyTerms()
e, g = universe.energyAndGradients()
print 'Gradient on Atom 1'
print g[universe.atom1]
print 'Gradient on Atom 2'
print g[universe.atom2]
print 'Gradient Test'
gradientTest(universe)
import time
start_time = time.time()
key = params['interpolation_type']
if params['inv_power'] is not None:
key += ', x**%d'%params['inv_power']
if params['energy_thresh']>0:
key += ', e<%f'%params['energy_thresh']
Es[key] = np.zeros((steps,1))
for n in range(steps):
universe.atom1.setPosition(Vector(x[n],0.5,1.5))
e, g = universe.energyAndGradients()
Es[key][n] = e
print 'Time to do %d energy and gradient evaluations: %f s'%(\
steps, time.time()-start_time)
import matplotlib.pyplot as plt
for key in Es.keys():
plt.plot(x,Es[key])
plt.legend(Es.keys())
plt.savefig('test_Interpolation_results.jpg')
|
mit
|
samueldotj/TeeRISC-Simulator
|
util/stats/output.py
|
90
|
7981
|
# Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
|
bsd-3-clause
|
ARudiuk/mne-python
|
mne/viz/tests/test_topomap.py
|
1
|
11019
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_array_equal
from nose.tools import assert_true, assert_equal
from mne import io, read_evokeds, read_proj
from mne.io.constants import FIFF
from mne.channels import read_layout, make_eeg_layout
from mne.datasets import testing
from mne.time_frequency.tfr import AverageTFR
from mne.utils import slow_test, run_tests_if_main
from mne.viz import plot_evoked_topomap, plot_projs_topomap
from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap)
from mne.viz.utils import _find_peaks
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg-proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
layout = read_layout('Vectorview-all')
def _get_raw():
return io.read_raw_fif(raw_fname, preload=False)
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
"""Test topomap plotting
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# evoked
warnings.simplefilter('always')
res = 16
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0))
# Test animation
_, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1],
butterfly=False)
anim._func(1) # _animate has to be tested separately on 'Agg' backend.
plt.close('all')
ev_bad = evoked.copy().pick_types(meg=False, eeg=True)
ev_bad.pick_channels(ev_bad.ch_names[:2])
ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6) # auto, plots EEG
assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
assert_raises(ValueError, ev_bad.plot_topomap, times=[-100]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
plt.close('all')
axes = [plt.subplot(221), plt.subplot(222)]
evoked.plot_topomap(axes=axes, colorbar=False)
plt.close('all')
evoked.plot_topomap(times=[-0.1, 0.2])
plt.close('all')
mask = np.zeros_like(evoked.data, dtype=bool)
mask[[1, 5], :] = True
evoked.plot_topomap(ch_type='mag', outlines=None)
times = [0.1]
evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
evoked.plot_topomap(times, ch_type='planar1', res=res)
evoked.plot_topomap(times, ch_type='planar2', res=res)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
show_names=True, mask_params={'marker': 'x'})
plt.close('all')
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average=-1000)
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average='hahahahah')
p = evoked.plot_topomap(times, ch_type='grad', res=res,
show_names=lambda x: x.replace('MEG', ''),
image_interp='bilinear')
subplot = [x for x in p.get_children() if
isinstance(x, matplotlib.axes.Subplot)][0]
assert_true(all('MEG' not in x.get_text()
for x in subplot.get_children()
if isinstance(x, matplotlib.text.Text)))
# Plot array
for ch_type in ('mag', 'grad'):
evoked_ = evoked.copy().pick_types(eeg=False, meg=ch_type)
plot_topomap(evoked_.data[:, 0], evoked_.info)
# fail with multiple channel types
assert_raises(ValueError, plot_topomap, evoked.data[0, :], evoked.info)
# Test title
def get_texts(p):
return [x.get_text() for x in p.get_children() if
isinstance(x, matplotlib.text.Text)]
p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
assert_equal(len(get_texts(p)), 0)
p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
texts = get_texts(p)
assert_equal(len(texts), 1)
assert_equal(texts[0], 'Custom')
plt.close('all')
# delaunay triangulation warning
with warnings.catch_warnings(record=True): # can't show
warnings.simplefilter('always')
evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
proj='interactive') # projs have already been applied
# change to no-proj mode
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0), proj=False)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
np.repeat(.1, 50))
assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
with warnings.catch_warnings(record=True): # file conventions
warnings.simplefilter('always')
projs = read_proj(ecg_fname)
projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
plot_projs_topomap(projs, res=res)
plt.close('all')
ax = plt.subplot(111)
plot_projs_topomap([projs[0]], res=res, axes=ax) # test axes param
plt.close('all')
for ch in evoked.info['chs']:
if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
ch['loc'].fill(0)
# Remove extra digitization point, so EEG digitization points
# correspond with the EEG electrodes
del evoked.info['dig'][85]
pos = make_eeg_layout(evoked.info).pos[:, :2]
pos, outlines = _check_outlines(pos, 'head')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.5)
pos, outlines = _check_outlines(pos, 'skirt')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(not outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.625)
pos, outlines = _check_outlines(pos, 'skirt',
head_pos={'scale': [1.2, 1.2]})
assert_array_equal(outlines['clip_radius'], 0.75)
# Plot skirt
evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
# Pass custom outlines without patch
evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
plt.close('all')
# Pass custom outlines with patch callable
def patch():
return Circle((0.5, 0.4687), radius=.46,
clip_on=True, transform=plt.gca().transAxes)
outlines['patch'] = patch
plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
# Remove digitization points. Now topomap should fail
evoked.info['dig'] = None
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
times, ch_type='eeg')
plt.close('all')
# Error for missing names
n_channels = len(pos)
data = np.ones(n_channels)
assert_raises(ValueError, plot_topomap, data, pos, show_names=True)
# Test error messages for invalid pos parameter
pos_1d = np.zeros(n_channels)
pos_3d = np.zeros((n_channels, 2, 2))
assert_raises(ValueError, plot_topomap, data, pos_1d)
assert_raises(ValueError, plot_topomap, data, pos_3d)
assert_raises(ValueError, plot_topomap, data, pos[:3, :])
pos_x = pos[:, :1]
pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
assert_raises(ValueError, plot_topomap, data, pos_x)
assert_raises(ValueError, plot_topomap, data, pos_xyz)
# An #channels x 4 matrix should work though. In this case (x, y, width,
# height) is assumed.
pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
plot_topomap(data, pos_xywh)
plt.close('all')
# Test peak finder
axes = [plt.subplot(131), plt.subplot(132)]
with warnings.catch_warnings(record=True): # rightmost column
evoked.plot_topomap(times='peaks', axes=axes)
plt.close('all')
evoked.data = np.zeros(evoked.data.shape)
evoked.data[50][1] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
evoked.data[80][100] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
evoked.data[2][95] = 2
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
def test_plot_tfr_topomap():
"""Test plotting of TFR data
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
raw = _get_raw()
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
res=16)
eclick = mpl.backend_bases.MouseEvent('button_press_event',
plt.gcf().canvas, 0, 0, 1)
eclick.xdata = 0.1
eclick.ydata = 0.1
eclick.inaxes = plt.gca()
erelease = mpl.backend_bases.MouseEvent('button_release_event',
plt.gcf().canvas, 0.9, 0.9, 1)
erelease.xdata = 0.3
erelease.ydata = 0.2
pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
_onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
tfr._onselect(eclick, erelease, None, 'mean', None)
plt.close('all')
run_tests_if_main()
|
bsd-3-clause
|
fabianp/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
28
|
53546
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
brookehus/msmbuilder
|
msmbuilder/tests/test_dataset.py
|
3
|
6077
|
from __future__ import print_function, absolute_import, division
import os
import shutil
import tempfile
import numpy as np
from nose.tools import assert_raises, assert_raises_regexp
from sklearn.externals.joblib import Parallel, delayed
from msmbuilder.dataset import dataset
from .test_commands import tempdir
# Nose wraps unittest with pep8 function names, but throws deprecation
# warnings about it!
import warnings
warnings.filterwarnings('ignore', message=r".*assertRaisesRegex.*",
category=DeprecationWarning)
def test_1():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
X = np.random.randn(10, 2)
ds = dataset(path, 'w', 'dir-npy')
ds[0] = X
assert set(os.listdir(path)) == set(('PROVENANCE.txt', '00000000.npy'))
np.testing.assert_array_equal(ds[0], X)
assert_raises(IndexError, lambda: ds[1])
assert len(ds) == 1
Y = np.zeros((10, 1))
Z = np.ones((2, 2))
ds[1] = Y
ds[2] = Z
np.testing.assert_array_equal(ds[1], Y)
np.testing.assert_array_equal(ds[2], Z)
assert len(ds) == 3
for i, item in enumerate(ds):
np.testing.assert_array_equal(item, [X, Y, Z][i])
except:
raise
finally:
shutil.rmtree(path)
def test_2():
path1 = tempfile.mkdtemp()
path2 = tempfile.mkdtemp()
shutil.rmtree(path1)
shutil.rmtree(path2)
try:
X = np.random.randn(10, 2)
Y = np.random.randn(10, 2)
ds1 = dataset(path1, 'w', 'dir-npy')
ds1[0] = X
ds2 = ds1.create_derived(path2)
ds2[0] = Y
np.testing.assert_array_equal(ds1[0], X)
np.testing.assert_array_equal(ds2[0], Y)
assert len(ds1) == 1
assert len(ds2) == 1
prov2 = ds2.provenance
print(prov2)
assert 2 == sum([s.startswith(' Command') for s in prov2.splitlines()])
except:
raise
finally:
shutil.rmtree(path1)
shutil.rmtree(path2)
def test_3():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
ds = dataset(path, 'w', 'dir-npy')
ds[0] = np.random.randn(10, 2)
ds[1] = np.random.randn(10, 2)
ds[2] = np.random.randn(10, 2)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
finally:
shutil.rmtree(path)
def test_4():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
ds = dataset(path, 'w', 'dir-npy')
ds[0] = np.random.randn(10, 2)
v = ds.get(0, mmap=True)
assert isinstance(v, np.memmap)
np.testing.assert_array_equal(ds[0], v)
del v # close the underlying file
finally:
shutil.rmtree(path)
def test_hdf5_1():
with tempdir():
ds = dataset('ds.h5', 'w', 'hdf5')
print(ds.provenance)
ds[0] = np.zeros(10)
np.testing.assert_array_equal(ds.get(0), np.zeros(10))
assert list(ds.keys()) == [0]
assert len(ds) == 1
ds[0] = np.random.randn(10, 1)
ds[1] = np.random.randn(10, 2)
ds[2] = np.random.randn(10, 3)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
ds.close()
with dataset('ds.h5') as ds:
assert ds[0].shape == (10, 1)
def test_hdf5_2():
with tempdir():
with dataset('ds.h5', 'w', 'hdf5') as ds:
ds2 = ds.create_derived('ds2.h5')
print(ds2.provenance)
ds2.close()
def _sum_helper(ds):
value = sum(np.sum(x) for x in ds)
ds.close()
return value
def test_hdf5_3():
with tempdir():
with dataset('ds.h5', 'w', 'hdf5') as ds:
ds[0] = np.random.randn(10)
ds[1] = np.random.randn(10)
ref_sum = _sum_helper(ds)
iter_args = (dataset('ds.h5') for _ in range(5))
sums = Parallel(n_jobs=2)(
delayed(_sum_helper)(a) for a in iter_args)
assert all(s == ref_sum for s in sums)
def test_union_no_longer_exists():
with assert_raises_regexp(ValueError,
r".*[Uu]se msmbuilder\.featurizer\.FeatureUnion.*"):
mds = dataset(['ds1.h5', 'ds2.h5'], fmt='hdf5-union')
def test_order_1():
with tempdir():
with dataset('ds1.h5', 'w', 'hdf5') as ds1:
for i in range(20):
ds1[i] = np.random.randn(10)
assert list(ds1.keys()) == list(range(20))
with dataset('ds1/', 'w', 'dir-npy') as ds1:
for i in range(20):
ds1[i] = np.random.randn(10)
assert list(ds1.keys()) == list(range(20))
def test_append_dirnpy():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
with dataset(path, 'w', 'dir-npy') as ds:
ds[0] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
ds[1] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
ds[2] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
# Overwrite
ds[2] = np.random.randn(10, 2)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
finally:
shutil.rmtree(path)
def test_items():
with tempdir():
ds = dataset('ds.h5', 'w', 'hdf5')
ds[0] = np.random.randn(10, 1)
ds[1] = np.random.randn(10, 2)
ds[5] = np.random.randn(10, 3)
keys = [0, 1, 5]
for i, (k, v) in enumerate(ds.items()):
assert k == keys[i]
np.testing.assert_array_equal(ds[k], v)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[5])
ds.close()
|
lgpl-2.1
|
ViennaRNA/forgi
|
forgi/projection/projection2d.py
|
1
|
46522
|
from __future__ import absolute_import, division, print_function, unicode_literals
# int is not imported from builtins here for performance reasons.
# See: https://github.com/PythonCharmers/python-future/issues/136
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
map, next, oct, open, pow, range, round,
str, super, zip, object)
import forgi.threedee.utilities.vector as ftuv
import forgi.threedee.utilities.graph_pdb as ftug
import forgi.utilities.stuff as fus
import collections as col
import numpy as np
import itertools as it
# import networkx as nx Takes to long. Import only when needed
import warnings
import math
import copy
import sys
import logging
log = logging.getLogger(__name__)
"""
This module uses code by David Eppstein
(http://code.activestate.com/recipes/117225-convex-hull-and-diameter-of-2d-point-sets/)
under the PSF license and code by Syrtis Major
(c)2014-2015 (http://stackoverflow.com/a/24567352/5069869) under the BSD 3-Clause license
and potentially copyrighted code from matplotlib under the PSF license
(http://matplotlib.org/examples/api/line_with_text.html).
"""
try:
profile # The @profile decorator from line_profiler (kernprof)
except NameError:
def profile(x):
return x
def to_rgb(im):
"""
Convert an np array image from grayscale to RGB
"""
# SEE http://www.socouldanyone.com/2013/03/converting-grayscale-to-rgb-with-numpy.html
try:
w, h = im.shape
except ValueError: # Probably RGB already
w, h, c = im.shape
return im
else:
newImg = np.empty((w, h, 3), dtype=np.uint8)
newImg[:, :, 2] = newImg[:, :, 1] = newImg[:, :, 0] = (
im * 255).astype(np.uint8)
return newImg
def to_grayscale(im):
"""
Convert an np array image from RGB to grayscale
"""
try:
w, h, _ = im.shape
except ValueError:
return im
else:
newImg = np.empty((w, h))
newImg[:, :] = (im[:, :, 0] / 255 + im[:, :, 1] /
255 + im[:, :, 2] / 255) / 3
return newImg
def rasterized_2d_coordinates(points, angstrom_per_cell=10, origin=np.array([0, 0]), rotate=0):
angle = math.radians(rotate)
c = np.cos(angle)
s = np.sin(angle)
rotMat = np.array([[c, -s], [s, c]])
#print("FPP: rasterization: rotationmatrix ",rotMat)
a = ((np.dot(points, rotMat) - origin) // angstrom_per_cell).astype(int)
b = np.dot(points, rotMat)
#print("FPP: rasterization: roatated: ", b)
b = b - origin
#print("FPP: rasterization: roatated and offset: ", b)
assert (a == (b // angstrom_per_cell).astype(int)).all()
return a
def crop_coordinates_to_bounds(a, num_cells):
"""
:a: an array of x,y coordinates (modified in place)
"""
return a.clip(min=0, max=num_cells - 1)
# The following functions are from
# http://code.activestate.com/recipes/117225-convex-hull-and-diameter-of-2d-point-sets/
# Used under the PSF License
############################################################################################
# convex hull (Graham scan by x-coordinate) and diameter of a set of points
# David Eppstein, UC Irvine, 7 Mar 2002
def orientation(p, q, r):
'''Return positive if p-q-r are clockwise, neg if ccw, zero if colinear.'''
return (q[1] - p[1]) * (r[0] - p[0]) - (q[0] - p[0]) * (r[1] - p[1])
#@profile
def hulls(Points):
'''Graham scan to find upper and lower convex hulls of a set of 2d points.'''
U = []
L = []
Points.sort(key=lambda x: (x[0], x[1]))
for p in Points:
while len(U) > 1 and orientation(U[-2], U[-1], p) <= 0:
U.pop()
while len(L) > 1 and orientation(L[-2], L[-1], p) >= 0:
L.pop()
U.append(p)
L.append(p)
return U, L
def rotatingCalipers(Points):
'''Given a list of 2d points, finds all ways of sandwiching the points
between two parallel lines that touch one point each, and yields the sequence
of pairs of points touched by each pair of lines.'''
U, L = hulls(Points)
i = 0
j = len(L) - 1
while i < len(U) - 1 or j > 0:
yield U[i], L[j]
# if all the way through one side of hull, advance the other side
if i == len(U) - 1:
j -= 1
elif j == 0:
i += 1
# still points left on both lists, compare slopes of next hull edges
# being careful to avoid divide-by-zero in slope calculation
elif (U[i + 1][1] - U[i][1]) * (L[j][0] - L[j - 1][0]) > \
(L[j][1] - L[j - 1][1]) * (U[i + 1][0] - U[i][0]):
i += 1
else:
j -= 1
def diameter(Points):
'''Given a list of 2d points, returns the pair that's farthest apart.'''
try:
diam, pair = max([((p[0] - q[0])**2 + (p[1] - q[1])**2, (p, q))
for p, q in rotatingCalipers(Points)], key=lambda x: x[0])
except:
print(repr([((p[0] - q[0])**2 + (p[1] - q[1])**2, (p, q))
for p, q in rotatingCalipers(Points)]))
raise
return pair
# END David Eppstein
#@profile
def rotate2D(vector, cosPhi, sinPhi):
x = vector[0] * cosPhi - vector[1] * sinPhi
y = vector[0] * sinPhi + vector[1] * cosPhi
return np.array([x, y])
def bresenham(start, end):
"""
Rasterize a line from start to end onto a grid with grid-width 1.
:param start: A sequence of length 2, containing the x,y coordinates of the start of the line
:param start: A sequence of length 2, containing the x,y coordinates of the end of the line
:returns: A list of tuples (x,y), where x and y are integers.
"""
# See e.g. http://stackoverflow.com/a/32252934/5069869
# or https://de.wikipedia.org/wiki/Bresenham-Algorithmus#C-Implementierung
if start == end:
return [start]
points = []
dx = end[0] - start[0]
dy = end[1] - start[1]
x, y = start
if dx == 0:
sx = 0
else:
sx = dx // abs(dx)
if dy == 0:
sy = 0
else:
sy = dy // abs(dy)
dx = abs(dx)
dy = abs(dy)
if dx > dy:
err = dx / 2.
while x != end[0]:
# print(x,y)
points.append((x, y))
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.
while y != end[1]:
# print(x,y)
points.append((x, y))
err -= dx
if err < 0:
x += sx
err += dy
y += sy
points.append((x, y))
# if abs(dx)>1 or abs(dy)>1:
#print(start, end, points)
return points
class Projection2D(object):
"""
A 2D Projection of a CoarseGrainRNA unto a 2D-plane
"""
@profile
def __init__(self, cg, proj_direction=None, rotation=0, project_virtual_atoms=False,
project_virtual_residues=[]):
"""
:param cg: a CoarseGrainRNA object with 3D coordinates for every element
.. note::
The projection is generated from this cg, but it is not associated
with it after construction.
Thus future changes of the cg are not reflected in the projection.
:param proj_direction: a carthesian vector (in 3D space) in the direction of projection.
The length of this vector is not used.
If proj_direction is None, cg.project_from is used.
If proj_direction and cg.project_from is None, an error is raised.
:param rotate: Degrees. Rotate the projection by this amount.
"""
#: The projected coordinates of all stems
self._coords = dict()
self._cross_points = None
self._proj_graph = None
# Calculate orthonormal basis of projection plane.
# Compare to none, because `if np.array:` raises ValueError.
if proj_direction is not None:
proj_direction = np.array(proj_direction, dtype=np.float)
elif cg.project_from is not None:
# We make a copy here. In case cg.project_from is modified,
# we still want to be able to look up from what direction the projection was generated.
proj_direction = np.array(cg.project_from, dtype=np.float)
else:
raise ValueError(
"No projection direction given and none present in the cg Object.")
_, unit_vec1, unit_vec2 = ftuv.create_orthonormal_basis(proj_direction)
self._unit_vec1 = unit_vec1
self._unit_vec2 = unit_vec2
self._proj_direction = proj_direction
self._virtual_residues = []
self.virtual_residue_numbers = project_virtual_residues
self._project(cg, project_virtual_atoms, project_virtual_residues)
# Rotate and translate projection into a standard orientation
points = list(self.points)
v1, v2 = diameter(points)
#: The longest distance between any two points of the projection.
self.longest_axis = ftuv.vec_distance(v1, v2)
v1 = np.array(v1)
v2 = np.array(v2)
shift = (v1 + v2) / 2
for key, edge in self._coords.items():
self._coords[key] = (edge[0] - shift, edge[1] - shift)
if project_virtual_atoms:
self._virtual_atoms = self._virtual_atoms - shift
if project_virtual_residues:
self._virtual_residues = self._virtual_residues - shift
rot = math.atan2(*(v2 - v1))
rot = math.degrees(rot)
self.rotate(rot)
xmean = np.mean([x[0] for p in self._coords.values() for x in p])
ymean = np.mean([x[1] for p in self._coords.values() for x in p])
mean = np.array([xmean, ymean])
for key, edge in self._coords.items():
self._coords[key] = (edge[0] - mean, edge[1] - mean)
# Thanks to numpy broadcasting, this works without a loop.
if project_virtual_atoms:
self._virtual_atoms = self._virtual_atoms - mean
if project_virtual_residues:
self._virtual_residues = self._virtual_residues - mean
# From this, further rotate if requested by the user.
if rotation != 0:
self.rotate(rotation)
### Functions modifying the projection in place ###
@profile
def rotate(self, angle):
"""
Rotate the projection in place around the origin (0,0)
:param angle: Rotation angle in degrees.
"""
angle = math.radians(angle)
c = np.cos(angle)
s = np.sin(angle)
self._proj_graph = None
for key, edge in self._coords.items():
self._coords[key] = (rotate2D(edge[0], c, s),
rotate2D(edge[1], c, s))
transRotMat = np.array([[c, s], [-s, c]])
if len(self._virtual_atoms):
self._virtual_atoms = np.dot(self._virtual_atoms, transRotMat)
if self.virtual_residue_numbers:
self._virtual_residues = np.dot(
self._virtual_residues, transRotMat)
def condense_points(self, cutoff=1):
"""
Condenses several projection points that are within a range of less than cutoff into
one point. This function modifies this Projection2D object.
.. note::
The result depends on the ordering of the dictionary holding
the nodes and might thus be pseudorandomly
:param cutoff: Two point with a distance smaller than cuttoff are contracted.
A value below 20 is reasonable.
"""
if cutoff <= 0:
return
while self._condense_one(cutoff):
pass
self.proj_graph.remove_edges_from(self.proj_graph.selfloop_edges())
def condense(self, cutoff):
"""
Condenses points that are within the cutoff of another point or edge (=line segment)
into a new point. This function modifies this Projection2D object.
The contraction of two points works like documented in
`self.condense_points(self, cutoff)`.
If a node is close to a line segment, a new point is generated between
this node and the line segment. Then the original line and the original node
are deleted and all connections attached to the new point.
.. note::
The result depends on the ordering of the dictionary holding the nodes and
might thus be pseudorandomly
:param cutoff: Two point with a distance smaller than cuttoff are contracted.
A value below 20 is reasonable.
"""
if cutoff <= 0:
return
self.condense_points(cutoff)
while self._condense_pointWithLine_step(cutoff):
self.condense_points(cutoff)
### Get virtual residues ###
@property
def vres_iterator(self):
for i, nr in enumerate(self.virtual_residue_numbers):
yield nr, self._virtual_residues[i]
def get_vres_by_position(self, pos):
"""
:param pos: The nucleotide position in the sequence
"""
for i, nr in enumerate(self.virtual_residue_numbers):
if nr == pos:
return self._virtual_residues[i]
else:
raise LookupError("This virtual residue was not projected")
### Properties ###
@property
def proj_direction(self):
"""A vector describing the direction of the projection"""
return self._proj_direction
@property
def crossingPoints(self):
"""
All points, where 2 segments intersect
A list of triples `(key1, key2, coordinate)`, where coordinate is a 2D vector.
"""
if self._cross_points is None:
self._cross_points = col.defaultdict(list)
for key1, key2 in it.combinations(self._coords, 2):
for cr in ftuv.seg_intersect(self._coords[key1], self._coords[key2]):
self._cross_points[key1].append((cr, key2))
self._cross_points[key2].append((cr, key1))
return self._cross_points
# Note: This is SLOW the first time it is called. Should be avoided as much as possible.
@property
def proj_graph(self):
"""A graph describing the projected RNA.
This graph is stored as a `networkx` graph object.
"""
if self._proj_graph is None:
self._build_proj_graph()
return self._proj_graph
@property
def points(self):
"""
All points that are at the ends of coarse-grain elements.
This does not include points where coarse grain elements intersect in the projection.
Some points might be duplicates
:returns: A generator yielding all points.
"""
# Exclude m and i Elements, as they are always flanked by stems.
return (p for k, x in self._coords.items() for p in x if k[0] != "i" and k[0] != "m")
### Functions returning descriptors of the projection, mostly independent on the resolution ###
### Function returning descriptors of the projection, dependent on the resolution ###
def get_bounding_box(self, margin=0.):
"""
Returns the coordinates for a box that contains all points of the 2D projection.
:param margin: increase the bounding box in every direction by this margin.
:returns: left, right, bottom, top
"""
points = [p for x in self._coords.values() for p in x]
#print("P", points)
left = min(x[0] for x in points) - margin
right = max(x[0] for x in points) + margin
bottom = min(x[1] for x in points) - margin
top = max(x[1] for x in points) + margin
# print "BB", left, right, bottom, top
return left, right, bottom, top
def get_bounding_square(self, margin=0.):
"""
Returns the coordinates for a square that contains all points of the 2D projection.
:param margin: increase the bounding box in every direction by this margin.
:returns: left, right, bottom, top
"""
bb = self.get_bounding_box()
length = max([bb[1] - bb[0], bb[3] - bb[2]]) / 2 + margin
x = (bb[0] + bb[1]) / 2
y = (bb[2] + bb[3]) / 2
return x - length, x + length, y - length, y + length
def get_branchpoint_count(self, degree=None):
"""
Returns the number of branchpoint.
.. note::
This measure is sensitive to the resolution of a projection.
In an AFM image, one might not see all branching points.
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
:param degree: If degree is None, count all points with degree>=3
Else: only count (branch)points of the given degree
"""
import networkx as nx
assert int(nx.__version__[
0]) >= 2, "This function only works with networkx version 2, not {}!".format(nx.__version__)
if degree is None:
return len([x[1] for x in nx.degree(self.proj_graph) if x[1] >= 3])
else:
return len([x[1] for x in nx.degree(self.proj_graph) if x[1] == degree])
def get_cyclebasis_len(self):
"""
Returns the number of cycles of length>1 in the cycle basis.
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
"""
import networkx as nx
return len([x for x in nx.cycle_basis(self.proj_graph) if len(x) > 1])
def get_total_length(self):
"""
Returns the sum of the lengths of all edges in the projection graph.
.. note::
This measure is sensitive to the resolution of a projection.
In an AFM image, one might not see all cycles.
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
"""
l = 0
for edge in self.proj_graph.edges():
l += ftuv.vec_distance(edge[0], edge[1])
return l
def get_longest_arm_length(self):
"""
Get the length of the longest arm.
An arm is a simple path from a node of `degree!=2` to a node of `degree 1`,
if all the other nodes on the path have `degree 2`.
.. note:: This measure is sensitive to the resolution of a projection
the same way the length of a coastline is sensitive to the resolution.
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
:returns: The length and a tuple of points `(leaf_node, corresponding_branch_point)`
"""
import networkx as nx
lengths = {}
target = {}
for leaf, degree in nx.degree(self.proj_graph):
if degree != 1:
continue
lengths[leaf] = 0
previous = None
current = leaf
while True:
next = [x for x in self.proj_graph[current].keys() if x !=
previous]
assert len(next) == 1
next = next[0]
lengths[leaf] += ftuv.vec_distance(current, next)
if self.proj_graph.degree(next) != 2:
break
previous = current
current = next
target[leaf] = next
best_leaf = max(lengths, key=lambda x: lengths[x])
return lengths[best_leaf], (best_leaf, target[best_leaf])
def get_leaf_leaf_distances(self):
"""
Get a list of distances between any pair of leaf nodes.
The distances are measured in direct line, not along the path
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
:returns: a list of floats (lengths in Angstrom)
"""
lengths = []
leaves = [leaf for leaf in self.proj_graph.nodes(
) if self.proj_graph.degree(leaf) == 1]
for leaf1, leaf2 in it.combinations(leaves, 2):
lengths.append(ftuv.vec_distance(leaf1, leaf2))
lengths.sort(reverse=True)
return lengths
def get_some_leaf_leaf_distances(self):
"""
Get a list of distances between some pairs of leaf nodes.
The distances are measured in direct line, not along the path
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
:returns: a list of floats (lengths in Angstrom)
"""
lengths = []
leaves = [leaf for leaf in self.proj_graph.nodes(
) if self.proj_graph.degree(leaf) == 1]
for leaf1, leaf2 in it.combinations(leaves, 2):
lengths.append((ftuv.vec_distance(leaf1, leaf2), leaf1, leaf2))
lengths.sort(reverse=True, key=lambda x: x[0])
newlengths = []
visited = set()
for l, leaf1, leaf2 in lengths:
if leaf1 in visited or leaf2 in visited:
continue
newlengths.append(l)
visited.add(leaf1)
visited.add(leaf2)
return newlengths
def get_maximal_path_length(self):
"""
Get the maximal path length from all simple paths that traverses the projection graph from
one leave node to another.
.. note::
This measure is sensitive to the resolution of a projection
the same way the length of a coastline is sensitive to the resoltuion.
.. warning::
Whether this code will stay in the library or not depends
on future evaluation of the usefulness of this and similar descriptors.
"""
import networkx as nx
maxl = 0
for i, node1 in enumerate(self.proj_graph.nodes()):
for j, node2 in enumerate(self.proj_graph.nodes()):
if j <= i:
continue
all_paths = nx.all_simple_paths(self.proj_graph, node1, node2)
for path in all_paths:
l = self._get_path_length(path)
if l > maxl:
maxl = l
return maxl
### Functions for graphical representations of the projection ###
@profile
def rasterize(self, resolution=50, bounding_square=None, warn=True,
virtual_atoms=True, rotate=0, virtual_residues=True):
"""
Rasterize the projection to a square image of the given resolution.
Uses the Bresenham algorithm for line rasterization.
:param resolution:
The number of pixels in each direction.
:param bounding_square:
Rasterize onto the given square.
If `None`, automatically get a bounding_square that
shows the whole projection
:param warn: If True, raise a warning if parts of the projection are not inside
the given bounding square.
:param virtual_atoms:
If True, virtual atoms are also rasterized.
:param rot: The in-plane rotation in degrees, applied before rotation.
:returns: A tuple `(np.array, float)`. The first value is a resolution x resolution
numpy 2D array.
The values are floats from 0.0 (black) to 1.0 (white).
This array can be directly plotted using matplotlib:
`pyplot.imshow(array, cmap='gray', interpolation='none')`
The second value is the length of one pixle in angstrom.
"""
if bounding_square is None:
bounding_square = self.get_bounding_square()
box = bounding_square
steplength = (box[1] - box[0]) / resolution
image = np.zeros([resolution, resolution], dtype=np.float32)
img_length = len(image)
angle = math.radians(rotate)
c = np.cos(angle)
s = np.sin(angle)
starts = []
ends = []
for label in self._coords:
if virtual_atoms and len(self._virtual_atoms):
if label[0] == "s":
continue
starts.append(self._coords[label][0])
ends.append(self._coords[label][1])
starts = np.array(starts)
ends = np.array(ends)
starts = rasterized_2d_coordinates(
starts, steplength, np.array([box[0], box[2]]), rotate)
ends = rasterized_2d_coordinates(
ends, steplength, np.array([box[0], box[2]]), rotate)
for i in range(len(starts)):
points = bresenham(tuple(starts[i]), tuple(ends[i]))
for p in points:
if 0 <= p[0] < img_length and 0 <= p[1] < img_length:
image[p[0], p[1]] = 1
else:
if warn:
warnings.warn("WARNING during rasterization of the 2D Projection: "
"Parts of the projection are cropped off.")
if virtual_atoms and len(self._virtual_atoms):
rot_virtual_atoms = rasterized_2d_coordinates(
self._virtual_atoms, steplength, np.array([box[0], box[2]]), rotate)
rot_virtual_atoms_clip = crop_coordinates_to_bounds(
rot_virtual_atoms, img_length)
if warn and (rot_virtual_atoms_clip != rot_virtual_atoms).any():
warnings.warn("WARNING during rasterization of virtual atoms: "
"Parts of the projection are cropped off.")
image[rot_virtual_atoms_clip[:, 0],
rot_virtual_atoms_clip[:, 1]] = 1
if virtual_residues and self.virtual_residue_numbers:
image = to_rgb(image)
rot_virtual_res = rasterized_2d_coordinates(
self._virtual_residues, steplength, np.array([box[0], box[2]]), rotate)
for i, point in enumerate(rot_virtual_res):
color = (
0, 150, 255 * self.virtual_residue_numbers[i] // max(self.virtual_residue_numbers))
if 0 <= point[0] < img_length and 0 <= point[1] < img_length:
image[point[0], point[1]] = color
else:
if warn:
warnings.warn("WARNING during rasterization of virtual residues: "
"Parts of the projection are cropped off.")
return image, steplength
def plot(self, ax=None, show=False, margin=5,
linewidth=None, add_labels=False,
line2dproperties={}, xshift=0, yshift=0,
show_distances=[], print_distances=False,
virtual_atoms=True):
"""
Plots the 2D projection.
This uses modified copy-paste code by Syrtis Major (c)2014-2015
under the BSD 3-Clause license and code from matplotlib under the PSF license.
:param ax: The axes to draw to.
You can get it by calling `fig, ax=matplotlib.pyplot.subplots()`
:param show: If true, the matplotlib.pyplot.show() will be called at the
end of this function.
:param margin: A numeric value.
The margin around the plotted projection inside the (sub-)plot.
:param linewidth: The width of the lines projection.
:param add_labels: Display the name of the corresponding coarse grain element in
the middle of each segment in the projection.
Either a bool or a set of labels to display.
:param line2dproperties:
A dictionary. Will be passed as `**kwargs` to the constructor of
`matplotlib.lines.Line2D`.
See http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D
:param xshift, yshift:
Shift the projection by the given amount inside the canvas.
:param show_distances:
A list of tuples of strings, e.g. `[("h1","h8"),("h2","m15")]`.
Show the distances between these elements in the plot
:param print_distances:
Bool. Print all distances from show_distances at the side of the plot
instead of directly next to the distance
"""
# In case of ssh without -X option, a TypeError might be raised
# during the import of pyplot
# This probably depends on the version of some library.
# This is also the reason why we import matplotlib only inside the plot function.
text = []
try:
if ax is None or show:
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
except TypeError as e:
warnings.warn("Cannot plot projection. Maybe you could not load Gtk "
"(no X11 server available)? During the import of matplotlib"
"the following Error occured:\n {}: {}".format(type(e).__name__, e))
return
except ImportError as e:
warnings.warn("Cannot import matplotlib. Do you have matplotlib installed? "
"The following error occured:\n {}: {}".format(type(e).__name__, e))
return
# try:
# import shapely.geometry as sg
# import shapely.ops as so
# except ImportError as e:
# warnings.warn("Cannot import shapely. "
# "The following error occured:\n {}: {}".format(type(e).__name__, e))
# area=False
# #return
# else:
# area=True
area = False
polygons = []
def circles(x, y, s, c='b', ax=None, vmin=None, vmax=None, **kwargs):
"""
Make a scatter of circles plot of x vs y, where x and y are sequence
like objects of the same lengths. The size of circles are in data scale.
Parameters
----------
x,y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circle in data scale (ie. in data unit)
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however.
ax : Axes object, optional, default: None
Parent axes of the plot. It uses gca() if not specified.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. (Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.)
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
eg. alpha, edgecolors, facecolors, linewidths, linestyles, norm, cmap
Examples
--------
a = np.arange(11)
circles(a, a, a*0.2, c=a, alpha=0.5, edgecolor='none')
License
--------
This function is copied (and potentially modified) from
http://stackoverflow.com/a/24567352/5069869
Copyright Syrtis Major, 2014-2015
This function is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
#import matplotlib.colors as colors
if ax is None:
raise TypeError()
if fus.is_string_type(c):
color = c # ie. use colors.colorConverter.to_rgba_array(c)
else:
color = None # use cmap, norm after collection is created
kwargs.update(color=color)
if np.isscalar(x):
patches = [Circle((x, y), s), ]
elif np.isscalar(s):
patches = [Circle((x_, y_), s) for x_, y_ in zip(x, y)]
else:
patches = [Circle((x_, y_), s_) for x_, y_, s_ in zip(x, y, s)]
collection = PatchCollection(patches, **kwargs)
if color is None:
collection.set_array(np.asarray(c))
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
ax.add_collection(collection)
ax.autoscale_view()
return collection
class MyLine(lines.Line2D):
"""
Copied and modified from http://matplotlib.org/examples/api/line_with_text.html,
which is part of matplotlib 1.5.0 (Copyright (c) 2012-2013 Matplotlib Development
Team; All Rights Reserved).
Used under the matplotlib license: http://matplotlib.org/users/license.html
"""
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
fm = font_manager.FontProperties(size="large", weight="demi")
self.text = mtext.Text(0, 0, '', fontproperties=fm)
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position(
((x[0] + x[-1]) / 2, (y[0] + y[-1]) / 2))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
if "linewidth" in line2dproperties and linewidth is not None:
warnings.warn(
"Got multiple values for 'linewidth' (also present in line2dproperties)")
if linewidth is not None:
line2dproperties["linewidth"] = linewidth
if "solid_capstyle" not in line2dproperties:
line2dproperties["solid_capstyle"] = "round"
if ax is None:
try:
fig, ax = plt.subplots(1, 1)
except Exception as e:
warnings.warn("Cannot create Axes or Figure. You probably have no graphical "
"display available. The Error was:\n {}: {}".format(type(e).__name__, e))
return
lprop = copy.copy(line2dproperties)
if virtual_atoms and len(self._virtual_atoms) > 0:
circles(
self._virtual_atoms[:, 0], self._virtual_atoms[:, 1], c="gray", s=0.7, ax=ax)
for label, (s, e) in self._coords.items():
if "color" not in line2dproperties:
if label.startswith("s"):
lprop["color"] = "green"
elif label.startswith("i"):
lprop["color"] = "gold"
elif label.startswith("h"):
lprop["color"] = "blue"
elif label.startswith("m"):
lprop["color"] = "red"
elif label.startswith("f") or label.startswith("t"):
lprop["color"] = "blue"
else:
lprop["color"] = "black"
if add_labels != False and (add_labels == True or label in add_labels):
lprop["label"] = label
else:
lprop["label"] = ""
#line=lines.Line2D([s[0], e[0]],[s[1],e[1]], **lprop)
line = MyLine([s[0] + xshift, e[0] + xshift],
[s[1] + yshift, e[1] + yshift], **lprop)
ax.add_line(line)
s = s + np.array([xshift, yshift])
e = e + np.array([xshift, yshift])
vec = np.array(e) - np.array(s)
nvec = np.array([vec[1], -vec[0]])
try:
div = math.sqrt(nvec[0]**2 + nvec[1]**2)
except ZeroDivisionError:
div = 100000
a = e + nvec * 5 / div
b = e - nvec * 5 / div
c = s + nvec * 5 / div
d = s - nvec * 5 / div
# For now disabling area representation
area = False
if area:
polygon = sg.Polygon([a, b, d, c])
polygons.append(polygon)
for s, e in show_distances:
st = (self._coords[s][0] + self._coords[s][1]) / 2
en = (self._coords[e][0] + self._coords[e][1]) / 2
d = ftuv.vec_distance(st, en)
if print_distances:
line = MyLine([st[0] + xshift, en[0] + xshift], [st[1] + yshift, en[1] + yshift],
color="orange", linestyle="--")
text.append("{:3} - {:3}: {:5.2f}".format(s, e, d))
else:
line = MyLine([st[0] + xshift, en[0] + xshift], [st[1] + yshift, en[1] + yshift],
label=str(round(d, 1)), color="orange", linestyle="--")
ax.add_line(line)
ax.axis(self.get_bounding_square(margin))
fm = font_manager.FontProperties(["monospace"], size="x-small")
if print_distances:
ax.text(0.01, 0.05, "\n".join(["Distances:"] + text),
transform=ax.transAxes, fontproperties=fm)
if area:
rnaArea = so.cascaded_union(polygons)
rnaXs, rnaYs = rnaArea.exterior.xy
ax.fill(rnaXs, rnaYs, alpha=0.5)
out = ax.plot()
if show:
plt.show()
return
return out
### Private functions ###
# Note: This is SLOW. Should be improved or removed in the future
def _build_proj_graph(self):
"""
Generate a graph from the 2D projection.
This is implemented as a networkx.Graph with the coordinates as nodes.
"""
import networkx as nx
proj_graph = nx.Graph()
for key, element in self._coords.items():
crs = self.crossingPoints
sortedCrs = ftuv.sortAlongLine(element[0], element[1], [
x[0] for x in crs[key]])
oldpoint = None
for point in sortedCrs:
point = (point[0], point[1]) # Tuple, to be hashable
if oldpoint is not None:
proj_graph.add_edge(
oldpoint, point, attr_dict={"label": key})
oldpoint = point
self._proj_graph = proj_graph
self.condense_points(0.00000000001) # To avoid floating point problems
@profile
def _project(self, cg, project_virtual_atoms, project_virtual_residues):
"""
Calculates the 2D coordinates of all coarse grained elements by vector rejection.
Stores them inside self._coords
"""
self._coords = dict()
self._virtual_atoms = []
basis = np.array([self._unit_vec1, self._unit_vec2]).T
# Project all coordinates to this plane
for key in cg.sorted_element_iterator():
val = cg.coords[key]
start = np.dot(val[0], basis)
end = np.dot(val[1], basis)
self._coords[key] = (start, end)
va = []
if project_virtual_atoms:
for residuePos in range(1, cg.seq_length + 1):
residue = cg.virtual_atoms(residuePos)
if project_virtual_atoms == "selected":
try:
va.append(residue['P'])
except KeyError:
pass
try:
va.append(residue["C1'"])
except KeyError:
assert False # Should never happen
try:
va.append(residue['C1'])
except KeyError:
pass
try:
va.append(residue["O3'"])
except KeyError:
pass
else:
for pos in residue.values():
va.append(pos)
if va:
self._virtual_atoms = np.dot(np.array(va), basis)
else:
warnings.warn("No virtual atoms present in {} of length {}!".format(
cg.name, len(cg.seq)))
vr = []
for res in project_virtual_residues:
vr.append(np.dot(cg.get_virtual_residue(res, True), basis))
assert len(vr) == len(project_virtual_residues)
if vr:
self._virtual_residues = np.array(vr)
def _condense_one(self, cutoff):
"""
Condenses two adjacent projection points into one.
:returns: True if a condensation was done, False if no condenstaion is possible.
"""
for i, node1 in enumerate(self.proj_graph.nodes()):
for j, node2 in enumerate(self.proj_graph.nodes()):
if j <= i:
continue
if ftuv.vec_distance(node1, node2) < cutoff:
newnode = ftuv.middlepoint(node1, node2)
# self.proj_graph.add_node(newnode)
for neighbor in list(self.proj_graph.adj[node1].keys()):
self.proj_graph.add_edge(newnode, neighbor,
attr_dict=self.proj_graph.adj[node1][neighbor])
for neighbor in list(self.proj_graph.adj[node2].keys()):
self.proj_graph.add_edge(newnode, neighbor,
attr_dict=self.proj_graph.adj[node2][neighbor])
if newnode != node1: # Equality can happen because of floating point inaccuracy
self.proj_graph.remove_node(node1)
if newnode != node2:
self.proj_graph.remove_node(node2)
return True
return False
def _condense_pointWithLine_step(self, cutoff):
"""
Used by `self.condense(cutoff)` as a single condensation step of a point
with a line segment.
"""
for i, source in enumerate(self.proj_graph.nodes()):
for j, target in enumerate(self.proj_graph.nodes()):
if j > i and self.proj_graph.has_edge(source, target):
for k, node in enumerate(self.proj_graph.nodes()):
if k == i or k == j:
continue
nearest = ftuv.closest_point_on_seg(
source, target, node)
nearest = tuple(nearest)
if nearest == source or nearest == target:
continue
if (ftuv.vec_distance(nearest, node) < cutoff):
newnode = ftuv.middlepoint(node, tuple(nearest))
attr_dict = self.proj_graph.adj[source][target]
self.proj_graph.remove_edge(source, target)
if source != newnode:
self.proj_graph.add_edge(
source, newnode, attr_dict=attr_dict)
if target != newnode:
self.proj_graph.add_edge(target, newnode,
attr_dict=attr_dict)
if newnode != node: # Equality possible bcse of floating point inaccuracy
for neighbor in self.proj_graph.adj[node].keys():
attr_dict = self.proj_graph.adj[node][neighbor]
self.proj_graph.add_edge(newnode, neighbor,
attr_dict=attr_dict)
self.proj_graph.remove_node(node)
return True
return False
def _get_path_length(self, path):
"""
:param path: a list of nodes
"""
l = 0
for i in range(len(path) - 1):
l += ftuv.vec_distance(path[i], path[i + 1])
return l
|
gpl-3.0
|
fzr72725/ThinkStats2
|
code/brfss.py
|
69
|
4708
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import sys
import pandas
import numpy as np
import thinkstats2
import thinkplot
def Summarize(df, column, title):
"""Print summary statistics male, female and all."""
items = [
('all', df[column]),
('male', df[df.sex == 1][column]),
('female', df[df.sex == 2][column]),
]
print(title)
print('key\tn\tmean\tvar\tstd\tcv')
for key, series in items:
mean, var = series.mean(), series.var()
std = math.sqrt(var)
cv = std / mean
t = key, len(series), mean, var, std, cv
print('%s\t%d\t%4.2f\t%4.2f\t%4.2f\t%4.4f' % t)
def CleanBrfssFrame(df):
"""Recodes BRFSS variables.
df: DataFrame
"""
# clean age
df.age.replace([7, 9], float('NaN'), inplace=True)
# clean height
df.htm3.replace([999], float('NaN'), inplace=True)
# clean weight
df.wtkg2.replace([99999], float('NaN'), inplace=True)
df.wtkg2 /= 100.0
# clean weight a year ago
df.wtyrago.replace([7777, 9999], float('NaN'), inplace=True)
df['wtyrago'] = df.wtyrago.apply(lambda x: x/2.2 if x < 9000 else x-9000)
def ReadBrfss(filename='CDBRFS08.ASC.gz', compression='gzip', nrows=None):
"""Reads the BRFSS data.
filename: string
compression: string
nrows: int number of rows to read, or None for all
returns: DataFrame
"""
var_info = [
('age', 101, 102, int),
('sex', 143, 143, int),
('wtyrago', 127, 130, int),
('finalwt', 799, 808, int),
('wtkg2', 1254, 1258, int),
('htm3', 1251, 1253, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, compression=compression, nrows=nrows)
CleanBrfssFrame(df)
return df
def MakeNormalModel(weights):
"""Plots a CDF with a Normal model.
weights: sequence
"""
cdf = thinkstats2.Cdf(weights, label='weights')
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = math.sqrt(var)
print('n, mean, std', len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label='model', linewidth=4, color='0.8')
thinkplot.Cdf(cdf)
def MakeNormalPlot(weights):
"""Generates a normal probability plot of birth weights.
weights: sequence
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
def MakeFigures(df):
"""Generates CDFs and normal prob plots for weights and log weights."""
weights = df.wtkg2.dropna()
log_weights = np.log10(weights)
# plot weights on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalModel(weights)
thinkplot.Config(xlabel='adult weight (kg)', ylabel='CDF')
thinkplot.SubPlot(2)
MakeNormalModel(log_weights)
thinkplot.Config(xlabel='adult weight (log10 kg)')
thinkplot.Save(root='brfss_weight')
# make normal probability plots on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalPlot(weights)
thinkplot.Config(xlabel='z', ylabel='weights (kg)')
thinkplot.SubPlot(2)
MakeNormalPlot(log_weights)
thinkplot.Config(xlabel='z', ylabel='weights (log10 kg)')
thinkplot.Save(root='brfss_weight_normal')
def main(script, nrows=1000):
"""Tests the functions in this module.
script: string script name
"""
thinkstats2.RandomSeed(17)
nrows = int(nrows)
df = ReadBrfss(nrows=nrows)
MakeFigures(df)
Summarize(df, 'htm3', 'Height (cm):')
Summarize(df, 'wtkg2', 'Weight (kg):')
Summarize(df, 'wtyrago', 'Weight year ago (kg):')
if nrows == 1000:
assert(df.age.value_counts()[40] == 28)
assert(df.sex.value_counts()[2] == 668)
assert(df.wtkg2.value_counts()[90.91] == 49)
assert(df.wtyrago.value_counts()[160/2.2] == 49)
assert(df.htm3.value_counts()[163] == 103)
assert(df.finalwt.value_counts()[185.870345] == 13)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
|
gpl-3.0
|
altermarkive/Resurrecting-JimFleming-Numerai
|
src/ml-zygmuntz--numer.ai/2017/check_consistency.py
|
1
|
1562
|
#!/usr/bin/env python3
"check validation consistency of predictions"
import json
import sys
import pandas as pd
from math import log
from sklearn.metrics import log_loss
import os
submission_file = os.getenv('PREDICTING')
test_file = os.getenv('TESTING')
try:
print("loading {}...".format(submission_file))
s = pd.read_csv(submission_file, header=0)
except:
print("\nUsage: check_consistency.py <predictions file> <test file>")
print(" i.e. check_consistency.py p.csv numerai_tournament_data.csv\n")
raise SystemExit
print("loading {}...\n".format(test_file))
test = pd.read_csv(test_file, header=0)
v = test[ test.data_type == 'validation' ].copy()
v = v.merge( s, on = 'id', how = 'left' )
eras = v.era.unique()
good_eras = 0
results = {'eras': []}
for era in eras:
tmp = v[ v.era == era ]
ll = log_loss( tmp.target, tmp.probability )
is_good = ll < -log( 0.5 )
if is_good:
good_eras += 1
print("{} {} {:.2%} {}".format(era, len(tmp), ll, is_good))
is_good = 'true' if is_good else 'false'
result = {'era': era, 'count': len(tmp), 'log_loss': ll, 'ok': is_good}
results['eras'].append(result)
consistency = good_eras / float( len( eras ))
print("\nconsistency: {:.1%} ({}/{})".format(consistency, good_eras, len(eras)))
results['consistency'] = consistency
ll = log_loss( v.target, v.probability )
print("log loss: {:.2%}\n".format(ll))
results['log_loss'] = ll
with open(os.getenv('CHECKING'), 'wb') as handle:
pretty = json.dumps(results, indent=2, separators=(',', ': '))
handle.write(pretty.encode('utf-8'))
|
mit
|
dhomeier/astropy
|
astropy/convolution/utils.py
|
3
|
10920
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import ctypes
import numpy as np
from astropy.modeling.core import FittableModel, custom_model
__all__ = ['discretize_model']
class DiscretizationError(Exception):
"""
Called when discretization of models goes wrong.
"""
class KernelSizeError(Exception):
"""
Called when size of kernels is even.
"""
def has_even_axis(array):
if isinstance(array, (list, tuple)):
return not len(array) % 2
else:
return any(not axes_size % 2 for axes_size in array.shape)
def raise_even_kernel_exception():
raise KernelSizeError("Kernel size must be odd in all axes.")
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2,
center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2,
center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(center[1] - array_2.shape[1] // 2,
center[1] + array_2.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_2.shape[0] // 2,
center[0] + array_2.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(center[1] - array_1.shape[1] // 2,
center[1] + array_1.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_1.shape[0] // 2,
center[0] + array_1.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode='center', factor=10):
"""
Function to evaluate analytical model functions on a grid.
So far the function can only deal with pixel coordinates.
Parameters
----------
model : `~astropy.modeling.FittableModel` or callable.
Analytic model function to be discretized. Callables, which are not an
instances of `~astropy.modeling.FittableModel` are passed to
`~astropy.modeling.custom_model` and then evaluated.
x_range : tuple
x range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined.
y_range : tuple, optional
y range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined. Necessary only for 2D models.
mode : str, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value
at the center of the bin.
* ``'linear_interp'``
Discretize model by linearly interpolating
between the values at the corners of the bin.
For 2D models interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average
on an oversampled grid.
* ``'integrate'``
Discretize model by integrating the model
over the bin using `scipy.integrate.quad`.
Very slow.
factor : float or int
Factor of oversampling. Default = 10.
Returns
-------
array : `numpy.array`
Model value array
Notes
-----
The ``oversample`` mode allows to conserve the integral on a subpixel
scale. Here is the example of a normalized Gaussian1D:
.. plot::
:include-source:
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.models import Gaussian1D
from astropy.convolution.utils import discretize_model
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
y_center = discretize_model(gauss_1D, (-2, 3), mode='center')
y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp')
y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample')
plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum()))
plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum()))
plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum()))
plt.xlabel('pixels')
plt.ylabel('value')
plt.legend()
plt.show()
"""
if not callable(model):
raise TypeError('Model must be callable.')
if not isinstance(model, FittableModel):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError('discretize_model only supports 1-d and 2-d models.')
if not float(np.diff(x_range)).is_integer():
raise ValueError("The difference between the upper and lower limit of"
" 'x_range' must be a whole number.")
if y_range:
if not float(np.diff(y_range)).is_integer():
raise ValueError("The difference between the upper and lower limit of"
" 'y_range' must be a whole number.")
if ndim == 2 and y_range is None:
raise ValueError("y range not specified, but model is 2-d")
if ndim == 1 and y_range is not None:
raise ValueError("y range specified, but model is only 1-d.")
if mode == "center":
if ndim == 1:
return discretize_center_1D(model, x_range)
elif ndim == 2:
return discretize_center_2D(model, x_range, y_range)
elif mode == "linear_interp":
if ndim == 1:
return discretize_linear_1D(model, x_range)
if ndim == 2:
return discretize_bilinear_2D(model, x_range, y_range)
elif mode == "oversample":
if ndim == 1:
return discretize_oversample_1D(model, x_range, factor)
if ndim == 2:
return discretize_oversample_2D(model, x_range, y_range, factor)
elif mode == "integrate":
if ndim == 1:
return discretize_integrate_1D(model, x_range)
if ndim == 2:
return discretize_integrate_2D(model, x_range, y_range)
else:
raise DiscretizationError('Invalid mode.')
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :]
+ values_intermediate_grid[:-1, :])
# Mean in x direction
values = 0.5 * (values[:, 1:]
+ values[:, :-1])
return values
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor))
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor))
y = np.linspace(y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] - 0.5 * (1 + 1 / factor),
num=int((y_range[1] - y_range[0]) * factor))
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values
|
bsd-3-clause
|
Silmathoron/nest-simulator
|
pynest/examples/spatial/test_3d.py
|
5
|
2178
|
# -*- coding: utf-8 -*-
#
# test_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
A spatial network in 3D
-------------------------
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
nest.ResetKernel()
pos = nest.spatial.free(nest.random.uniform(-0.5, 0.5), extent=[1.5, 1.5, 1.5])
l1 = nest.Create('iaf_psc_alpha', 1000, positions=pos)
# visualize
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*nest.GetPosition(l1))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# full connections in volume [-0.2,0.2]**3
nest.Connect(l1, l1,
{'rule': 'pairwise_bernoulli',
'p': 1.,
'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.2, -0.2, -0.2],
'upper_right': [0.2, 0.2, 0.2]}}})
# show connections from center element
# sender shown in red, targets in green
ctr = nest.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*nest.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = nest.GetPosition(ctr)
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = nest.GetTargetNodes(ctr, l1)[0]
distances = nest.Distance(ctr, l1)
tgt_distances = [d for i, d in enumerate(distances) if i + 1 in tgts]
plt.figure()
plt.hist(tgt_distances, 25)
plt.show()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.