repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jiajiechen/mxnet | example/gluon/kaggle_k_fold_cross_validation.py | 25 | 6871 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.collect_params().initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| apache-2.0 |
PythonCharmers/bokeh | bokeh/charts/builder/tests/test_boxplot_builder.py | 31 | 4900 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
import pandas as pd
import blaze
from bokeh.charts import BoxPlot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestBoxPlot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
groups = ['bronze', 'silver', 'gold']
xyvaluesdf = pd.DataFrame(xyvalues)
xyvaluesbl = blaze.Data(xyvaluesdf)
exptected_datarect = {
'colors': ['#f22c40', '#5ab738', '#407ee7'],
'groups': ['bronze', 'silver', 'gold'],
'iqr_centers': [2.5, 2.5, 2.5],
'iqr_lengths': [3.0, 3.0, 4.5],
'lower_center_boxes': [1.25, 1.5, 1.125],
'lower_height_boxes': [0.5, 1.0, 1.75],
'upper_center_boxes': [2.75, 3.0, 3.375],
'upper_height_boxes': [2.5, 2.0, 2.75],
'width': [0.8, 0.8, 0.8]
}
expected_scatter = {
'colors': ['#f22c40'],
'out_x': ['bronze'],
'out_y': [10.0]
}
expected_seg = {
'lower': [-3.5, -3.5, -6.5],
'q0': [1.0, 1.0, 0.25],
'q2': [4.0, 4.0, 4.75],
'upper': [8.5, 8.5, 11.5]
}
for i, _xy in enumerate([xyvalues, xyvaluesdf, xyvaluesbl]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
lvalues = [
np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0]),
np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.]),
np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.])
]
groups = exptected_datarect['groups'] = ['0', '1', '2']
expected_scatter['out_x'] = ['0']
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
def test_no_outliers(self):
xyvalues = [7.0, 7.0, 8.0, 8.0, 9.0, 9.0]
bp = create_chart(BoxPlot, xyvalues, outliers=True)
builder = bp._builders[0]
outliers = builder._data_scatter['out_y']
self.assertEqual(len(outliers), 0)
| bsd-3-clause |
puruckertom/poptox | poptox/yulefurry/yulefurry_exe.py | 2 | 4165 | import numpy as np
import os.path
import pandas as pd
import sys
#find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
#print(sys.path)
#print(os.path)
class YuleFurryInputs(ModelSharedInputs):
"""
Input class for YuleFurry.
"""
def __init__(self):
"""Class representing the inputs for YuleFurry"""
super(YuleFurryInputs, self).__init__()
self.init_pop_size = pd.Series([], dtype="float")
self.birth_probability = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
self.n_iterations = pd.Series([], dtype="float")
class YuleFurryOutputs(object):
"""
Output class for YuleFurry.
"""
def __init__(self):
"""Class representing the outputs for YuleFurry"""
super(YuleFurryOutputs, self).__init__()
# self.out_x = pd.Series(name="out_x")
# self.out_x_mu = pd.Series(name="out_x_mu")
#dictionary of time, outputs
self.out_pop_time_series = [] #x
self.out_x_mu = []
class YuleFurry(UberModel, YuleFurryInputs, YuleFurryOutputs):
"""
YuleFurry model for population growth.
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the YuleFurry model and containing all its methods"""
super(YuleFurry, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
self.populate_inputs(self.pd_obj, self)
self.pd_obj_out = self.populate_outputs(self)
self.run_methods()
self.fill_output_dataframe(self)
# Begin model methods
def run_methods(self):
""" Execute all algorithm methods for model logic """
try:
# dictionaries of population time series
self.batch_yulefurry()
except Exception as e:
print(str(e))
def exponential_growth(self):
index_set = range(self.time_steps + 1)
x = np.zeros(len(index_set))
x[0] = self.init_pop_size
for n in index_set[1:]:
x[n] = self.init_pop_size * np.exp(self.growth_rate / 100 * n)
self.out_pop_time_series = x.tolist()
return self.out_pop_time_series
def yule_furry_growth(self):
#N_o, T, rho, Ite
#init_pop_size, time_steps, birth_probability, n_iterations
index_set = range(self.time_steps[idx] + 1)
x = np.zeros((self.n_iterations[idx], len(index_set)))
x_mu = np.zeros(len(index_set))
x_mu[0] = self.init_pop_size[idx]
self.birth_probability[idx] /= 100
for i in range(0, n_iterations):
#rho=1-np.exp(-rho)
x[i][0] = self.init_pop_size[idx]
n = 0
while n < self.time_steps[idx]:
x_mu[n+1] = (1 + self.birth_probability[idx]) * x_mu[n]
if x[i][n] < 10000:
m = np.random.random(x[i][n])
n_birth = np.sum(m < self.birth_probability[idx])
x[i][n+1] = x[i][n] + n_birth
else:
x[i][n+1] = (1 + self.birth_probability[idx]) * x[i][n]
n += 1
# self.out_x = x.tolist()
# self.out_x_mu = x_mu.tolist()
# return
t = range(0, self.time_steps[idx])
xmu = range(0, self.birth_probability[idx])
d_t = dict(zip(t, x))
d_xmu = dict(zip(xmu,x))
self.out_pop_time_series[idx].append(d_t) #x
self.out_x_mu[idx].apend(d_xmu)
return
def batch_yulefurry(self):
for idx in enumerate(self.init_pop_size):
self.yule_furry_growth(idx)
return | unlicense |
phiros/nepi | examples/ccn_emu_live/dce_4_nodes_linear.py | 1 | 8477 | #!/usr/bin/env python
###############################################################################
#
# NEPI, a framework to manage network experiments
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
#
###############################################################################
from nepi.execution.ec import ExperimentController
from nepi.execution.runner import ExperimentRunner
from nepi.util.netgraph import TopologyType
import nepi.data.processing.ccn.parser as ccn_parser
import networkx
import socket
import os
import numpy
from scipy import stats
from matplotlib import pyplot
import math
import random
def avg_interest_rtt(ec, run):
logs_dir = ec.run_dir
# Parse downloaded CCND logs
(graph,
content_names,
interest_expiry_count,
interest_dupnonce_count,
interest_count,
content_count) = ccn_parser.process_content_history_logs(
logs_dir, ec.netgraph.topology)
# statistics on RTT
rtts = [content_names[content_name]["rtt"] \
for content_name in content_names.keys()]
# sample mean and standard deviation
sample = numpy.array(rtts)
n, min_max, mean, var, skew, kurt = stats.describe(sample)
std = math.sqrt(var)
ci = stats.t.interval(0.95, n-1, loc = mean,
scale = std/math.sqrt(n))
global metrics
metrics.append((mean, ci[0], ci[1]))
return mean
def normal_law(ec, run, sample):
x = numpy.array(sample)
n = len(sample)
std = x.std()
se = std / math.sqrt(n)
m = x.mean()
se95 = se * 2
return m * 0.05 >= se95
def post_process(ec, runs):
global metrics
# plot convergence graph
y = numpy.array([float(m[0]) for m in metrics])
low = numpy.array([float(m[1]) for m in metrics])
high = numpy.array([float(m[2]) for m in metrics])
error = [y - low, high - y]
x = range(1,runs + 1)
# plot average RTT and confidence interval for each iteration
pyplot.errorbar(x, y, yerr = error, fmt='o')
pyplot.plot(x, y, 'r-')
pyplot.xlim([0.5, runs + 0.5])
pyplot.xticks(numpy.arange(1, len(y)+1, 1))
pyplot.xlabel('Iteration')
pyplot.ylabel('Average RTT')
pyplot.grid()
pyplot.savefig("plot.png")
pyplot.show()
content_name = "ccnx:/test/bunny.ts"
STOP_TIME = "5000s"
repofile = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"repoFile1.0.8.2")
def get_simulator(ec):
simulator = ec.filter_resources("linux::ns3::Simulation")
if not simulator:
node = ec.register_resource("linux::Node")
ec.set(node, "hostname", "localhost")
simu = ec.register_resource("linux::ns3::Simulation")
ec.register_connection(simu, node)
return simu
return simulator[0]
def add_collector(ec, trace_name, subdir, newname = None):
collector = ec.register_resource("Collector")
ec.set(collector, "traceName", trace_name)
ec.set(collector, "subDir", subdir)
if newname:
ec.set(collector, "rename", newname)
return collector
def add_dce_host(ec, nid):
simu = get_simulator(ec)
host = ec.register_resource("ns3::Node")
ec.set(host, "enableStack", True)
ec.register_connection(host, simu)
# Annotate the graph
ec.netgraph.annotate_node(nid, "host", host)
def add_dce_ccnd(ec, nid):
# Retrieve annotation from netgraph
host = ec.netgraph.node_annotation(nid, "host")
# Add dce ccnd to the dce node
ccnd = ec.register_resource("linux::ns3::dce::CCND")
ec.set (ccnd, "stackSize", 1<<20)
ec.set (ccnd, "debug", 7)
ec.set (ccnd, "capacity", 50000)
ec.set (ccnd, "StartTime", "1s")
ec.set (ccnd, "StopTime", STOP_TIME)
ec.register_connection(ccnd, host)
# Collector to retrieve ccnd log
collector = add_collector(ec, "stderr", str(nid), "log")
ec.register_connection(collector, ccnd)
# Annotate the graph
ec.netgraph.annotate_node(nid, "ccnd", ccnd)
def add_dce_ccnr(ec, nid):
# Retrieve annotation from netgraph
host = ec.netgraph.node_annotation(nid, "host")
# Add a CCN content repository to the dce node
ccnr = ec.register_resource("linux::ns3::dce::CCNR")
ec.set (ccnr, "repoFile1", repofile)
ec.set (ccnr, "stackSize", 1<<20)
ec.set (ccnr, "StartTime", "2s")
ec.set (ccnr, "StopTime", STOP_TIME)
ec.register_connection(ccnr, host)
def add_dce_ccncat(ec, nid):
# Retrieve annotation from netgraph
host = ec.netgraph.node_annotation(nid, "host")
# Add a ccncat application to the dce host
ccncat = ec.register_resource("linux::ns3::dce::CCNCat")
ec.set (ccncat, "contentName", content_name)
ec.set (ccncat, "stackSize", 1<<20)
ec.set (ccncat, "StartTime", "8s")
ec.set (ccncat, "StopTime", STOP_TIME)
ec.register_connection(ccncat, host)
def add_dce_fib_entry(ec, nid1, nid2):
# Retrieve annotations from netgraph
host1 = ec.netgraph.node_annotation(nid1, "host")
net = ec.netgraph.edge_net_annotation(nid1, nid2)
ip2 = net[nid2]
# Add FIB entry between peer hosts
ccndc = ec.register_resource("linux::ns3::dce::FIBEntry")
ec.set (ccndc, "protocol", "udp")
ec.set (ccndc, "uri", "ccnx:/")
ec.set (ccndc, "host", ip2)
ec.set (ccndc, "stackSize", 1<<20)
ec.set (ccndc, "StartTime", "2s")
ec.set (ccndc, "StopTime", STOP_TIME)
ec.register_connection(ccndc, host1)
def add_dce_net_iface(ec, nid1, nid2):
# Retrieve annotations from netgraph
host = ec.netgraph.node_annotation(nid1, "host")
net = ec.netgraph.edge_net_annotation(nid1, nid2)
ip1 = net[nid1]
prefix = net["prefix"]
dev = ec.register_resource("ns3::PointToPointNetDevice")
ec.set(dev,"DataRate", "5Mbps")
ec.set(dev, "ip", ip1)
ec.set(dev, "prefix", prefix)
ec.register_connection(host, dev)
queue = ec.register_resource("ns3::DropTailQueue")
ec.register_connection(dev, queue)
return dev
def add_edge(ec, nid1, nid2):
### Add network interfaces to hosts
p2p1 = add_dce_net_iface(ec, nid1, nid2)
p2p2 = add_dce_net_iface(ec, nid2, nid1)
# Create point to point link between interfaces
chan = ec.register_resource("ns3::PointToPointChannel")
ec.set(chan, "Delay", "0ms")
ec.register_connection(chan, p2p1)
ec.register_connection(chan, p2p2)
#### Add routing between CCN nodes
add_dce_fib_entry(ec, nid1, nid2)
add_dce_fib_entry(ec, nid2, nid1)
def add_node(ec, nid):
### Add CCN nodes (ec.netgraph holds the topology graph)
add_dce_host(ec, nid)
add_dce_ccnd(ec, nid)
if nid == ec.netgraph.targets()[0]:
add_dce_ccnr(ec, nid)
if nid == ec.netgraph.sources()[0]:
add_dce_ccncat(ec, nid)
def wait_guids(ec):
return ec.filter_resources("linux::ns3::dce::CCNCat")
if __name__ == '__main__':
metrics = []
# topology translation to NEPI model
ec = ExperimentController("dce_4n_linear",
topo_type = TopologyType.LINEAR,
node_count = 4,
assign_st = True,
assign_ips = True,
add_node_callback = add_node,
add_edge_callback = add_edge)
#### Run experiment until metric convergence
rnr = ExperimentRunner()
runs = rnr.run(ec,
min_runs = 10,
max_runs = 100,
compute_metric_callback = avg_interest_rtt,
evaluate_convergence_callback = normal_law,
wait_guids = wait_guids(ec))
### post processing
post_process(ec, runs)
| gpl-3.0 |
DerThorsten/seglib | seglibpython/seglib/clustering/ce_multicut.py | 1 | 7168 | from seglib import cgp2d
from seglib.preprocessing import norm01
import opengm
import numpy
import vigra
from sklearn.cluster import Ward,WardAgglomeration
class CgpClustering(object):
def __init__(self,cgp):
self.cgp = cgp
self.labels = numpy.zeros(self.cgp.numCells(2),dtype=numpy.uint64)
class HierarchicalClustering(CgpClustering):
def __init__(self,cgp):
super(HierarchicalClustering, self).__init__(cgp)
self.connectivity = cgp.sparseAdjacencyMatrix()
def segment(self,features,nClusters):
#print "features",features.shape
#print "self.connectivity",self.connectivity.shape
self.ward = WardAgglomeration(n_clusters=nClusters, connectivity=self.connectivity).fit(features.T)
self.labels[:] = self.ward.labels_
def mergedCgp(self):
newLabels = self.cgp.featureToImage(cellType=2,features=self.labels.astype(numpy.float32),useTopologicalShape=False)
cgp,tgrid = cgp2d.cgpFromLabels(newLabels.astype(numpy.uint64)+1)
return cgp,tgrid
def probabilityToWeights(p1,out,beta=0.5):
assert len(out)==len(p1)
p0 = 1.0 - p1
out[:]=numpy.log( p0 / p1 ) + numpy.log((1.0-beta)/beta)
return out
def sampleFromGauss(mean,std,out):
#print "mean",mean.shape
#print "std",std.shape
#print "out",out.shape
assert len(mean)==len(std)
assert len(out)==len(mean)
n = len(mean)
samples = numpy.random.standard_normal(n)
samples *=std
samples +=mean
return samples
def gaussOffset(mean,std):
return std*float(numpy.random.standard_normal(1))+mean
def gradientToWeight(gradient,gamma):
#normGrad = norm01(gradient)
e = numpy.exp(-gamma*gradient)
e1 = e
e0 = 1.0-e1
"""
print "g ",gradient[:5]
print "e0",e0[:5]
print "e1",e1[:5]
print "w ",(e0-e1)[:5]
"""
return e1-e0
def imgToWeight(cgp,img,gamma,method='exp'):
if tuple(cgp.shape)!=(img.shape):
img=vigra.sampling.resize(img,cgp.shape)
img =norm01(img)+0.1
img/=1.1
accgrad = cgp.accumulateCellFeatures(cellType=1,image=img,features='Mean')[0]['Mean']
if method =='exp':
weights = gradientToWeight(gradient=accgrad,gamma=gamma)
return weights
else :
raise RuntimeError("not impl")
def multicutFromCgp(cgp,weights=None,parameter=None):
boundArray = cgp.cell1BoundsArray()-1
nVar = cgp.numCells(2)
nFac = cgp.numCells(1)
space = numpy.ones(nVar,dtype=opengm.label_type)*nVar
gm = opengm.gm(space)
wZero = numpy.zeros(nFac,dtype=opengm.value_type)
if weights is None:
pf=opengm.pottsFunctions([nVar,nVar],wZero,wZero)
else :
w = numpy.require(weights,dtype=opengm.value_type)
pf=opengm.pottsFunctions([nVar,nVar],wZero,w)
fids = gm.addFunctions(pf)
gm.addFactors(fids,boundArray)
cgc = opengm.inference.Cgc(gm=gm,parameter=parameter)
return cgc,gm
def multicutFromCgp2(cgp,e0,e1,parameter=None):
boundArray = cgp.cell1BoundsArray()-1
nVar = cgp.numCells(2)
nFac = cgp.numCells(1)
space = numpy.ones(nVar,dtype=opengm.label_type)*nVar
gm = opengm.gm(space)
#w = numpy.require(weights,dtype=opengm.value_type)
pf=opengm.pottsFunctions([nVar,nVar],e0,e1)
fids = gm.addFunctions(pf)
gm.addFactors(fids,boundArray)
cgc = opengm.inference.Cgc(gm=gm,parameter=parameter)
return cgc,gm
class AggloCut(object):
def __init__(self,initCgp,edgeImage,featureImage,rgbImage,siftImage,histImage):
self.initCgp = initCgp
self.edgeImage = edgeImage
self.featureImage = featureImage
self.rgbImage = rgbImage
self.siftImage = siftImage
self.histImage = histImage
#
self.iterCgp = initCgp
def infer(self,gammas,deleteN):
cgp2d.visualize(self.rgbImage,cgp=self.iterCgp)
for gamma in gammas:
# get the weights for this gamma
#weights = gradientToWeight(self.edgeImage,gamma)
#w=e1-e0
cuts=True
while(True):
edge = self.iterCgp.accumulateCellFeatures(cellType=1,image=self.edgeImage,features='Mean')[0]['Mean']
feat = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.featureImage,features='Mean')[0]['Mean']
sift = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.siftImage,features='Mean')[0]['Mean']
hist = self.iterCgp.accumulateCellFeatures(cellType=2,image=self.histImage,features='Mean')[0]['Mean']
featDiff = numpy.sqrt(self.iterCgp.cell2ToCell1Feature(feat,mode='l2'))/10.0
siftDiff = (self.iterCgp.cell2ToCell1Feature(sift,mode='chi2'))*10
histDiff = (self.iterCgp.cell2ToCell1Feature(hist,mode='chi2'))*10
print 'featMax',featDiff.min(),featDiff.max()
print 'edgeMax',edge.min(),edge.max()
print 'sift',siftDiff.min(),siftDiff.max()
print 'hist',histDiff.min(),histDiff.max()
edge+=0.1*featDiff
edge+=1.0*siftDiff
edge+=3.0*histDiff
cuts=False
e1=numpy.exp(-gamma*edge)
e0=1.0-e1
for ci in range(self.iterCgp.numCells(1)):
size = len(self.iterCgp.cells1[ci].points)
#print size
e0[ci]*=float(size)
e1[ci]*=float(size)
for ci in range(self.iterCgp.numCells(1)):
bb = len(self.iterCgp.cells1[ci].boundedBy)
if bb==0 :
print "ZERO BOUNDS \n\n"
#e0[ci]*=float(size)
e1[ci]+=2.0
for ci in range(self.iterCgp.numCells(2)):
size = len(self.iterCgp.cells1[ci].points)
if size<=200 :
boundedBy=numpy.array(self.iterCgp.cells2[ci].boundedBy)-1
e1[boundedBy]+=2.0
w = e1-e0
if True:
cgc,gm = multicutFromCgp2(cgp=self.iterCgp,e0=e0,e1=e1,parameter=opengm.InfParam(planar=True,inferMinMarginals=True))
deleteN = 1#2*int(float(self.iterCgp.numCells(1))**(0.5)+0.5)
#cgc.infer(cgc.verboseVisitor())
cgc.infer()
argDual = cgc.argDual()
if(argDual.min()==1):
print "READ GAMMA"
gamma*=0.9
continue
else:
cuts=True
#cgp2d.visualize(self.rgbImage,cgp=self.iterCgp,edge_data_in=argDual.astype(numpy.float32))
factorMinMarginals = cgc.factorMinMarginals()
m0 = factorMinMarginals[:,0].astype(numpy.float128)
m1 = factorMinMarginals[:,1].astype(numpy.float128)
m0*=-1.0
m1*=-1.0
p0 = numpy.exp(m0)/(numpy.exp(m0)+numpy.exp(m1))
p1 = numpy.exp(m1)/(numpy.exp(m0)+numpy.exp(m1))
#cgp2d.visualize(self.rgbImage,cgp=self.iterCgp,edge_data_in=p1.astype(numpy.float32))
whereOn = numpy.where(argDual==1)
nOn = len(whereOn[0])
nOff = len(p0)-nOn
print "nOn",nOn,"off",nOff
p1[whereOn]+=100.0
sortedIndex = numpy.argsort(p1)
toDelete = 1
if deleteN > nOff:
toDelete = nOff
cellStates = numpy.ones(self.iterCgp.numCells(1),dtype=numpy.uint32)
cellStates[sortedIndex[:toDelete]]=0
#cellStates[numpy.argmax(w)]=0
print "argmax"
else :
cellStates = numpy.ones(self.iterCgp.numCells(1),dtype=numpy.uint32)
#cellStates[sortedIndex[:toDelete]]=0
cellStates[numpy.argmax(w)]=0
if self.iterCgp.numCells(2)<50:
cgp2d.visualize(self.rgbImage,cgp=self.iterCgp)
print "merge cells",self.iterCgp.numCells(2),self.iterCgp.numCells(1)
newtgrid = self.iterCgp.merge2Cells(cellStates)
self.iterCgp = cgp2d.Cgp(newtgrid)
class CeMc(object):
def __init__(self,cgp):
self.cgp=cgp | mit |
krafczyk/spack | var/spack/repos/builtin/packages/py-pymatgen/package.py | 5 | 2609 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPymatgen(PythonPackage):
"""Python Materials Genomics is a robust materials analysis code that
defines core object representations for structures and molecules with
support for many electronic structure codes. It is currently the core
analysis code powering the Materials Project."""
homepage = "http://www.pymatgen.org/"
url = "https://pypi.io/packages/source/p/pymatgen/pymatgen-4.7.2.tar.gz"
version('4.7.2', '9c3a6e8608671c216e4ef89778646fd6')
version('4.6.2', '508f77fdc3e783587348e93e4dfed1b8')
extends('python', ignore='bin/tabulate')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-tabulate', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 |
shenzebang/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
soylentdeen/BlurryApple | Learning/transfer_functions.py | 1 | 2709 | import scipy
import numpy
import matplotlib.pyplot as pyplot
class wavefront( object ):
def __init__(self, subsampling):
self.subsampling = subsampling
self.time = 0.0
def generate_data(self, dt):
t = numpy.arange(self.time, self.time+dt, self.subsampling)
d = scipy.randn(len(t))+50.0*numpy.sin(t)+13.0*numpy.cos(t*23.0)
self.time = t[-1]
return d
def move_actuator(self, correction):
self.correction = correction
class detector( object ):
def __init__(self, sampling_rate, openloop, closedloop):
self.sampling_rate = sampling_rate
self.openloop = openloop
self.closedloop = closedloop
def integrate(self):
dt = 1.0/self.sampling_rate
ol = self.openloop.generate_data(dt)
cl = self.closedloop.generate_data(dt)
int_open = numpy.sum(ol)/len(ol)
int_closed = numpy.sum(cl)/len(cl)
return int_open, int_closed
class controlcomputer( object ):
def __init__(self, refpos):
self.refpos = refpos
def calculate_correction(self, measurement):
correction = self.refpos - measurement
return correction
class deformablemirror( object ):
def __init__(self, wavefront):
self.wave = wavefront
self.correction = 0.0
def apply_correction(self, correction):
self.correction += correction
def correct_wavefront(self, dt):
data = self.wave.generate_data(dt) + self.correction
return data
def generate_data(self, dt):
return self.correct_wavefront(dt)
class realtimecomputer( object ):
def __init__(self):
self.wave = wavefront(0.001)
self.dm = deformablemirror(self.wave)
self.det = detector(500.0, self.wave, self.dm)
self.cc = controlcomputer(0.0)
def closeloop(self, looptime):
open_loop = []
closed_loop = []
time = []
correction = []
while self.wave.time < looptime:
ol, cl = self.det.integrate()
corr = self.cc.calculate_correction(cl)
self.dm.apply_correction(corr)
open_loop.append(ol)
closed_loop.append(cl)
correction.append(self.dm.correction)
time.append(self.wave.time)
self.open_loop = numpy.array(open_loop)
self.closed_loop = numpy.array(closed_loop)
self.time = numpy.array(time)
self.correction = numpy.array(correction)
rtc = realtimecomputer()
rtc.closeloop(100.0)
fig = pyplot.figure(0)
fig.clear()
a = fig.add_axes([0.1, 0.1, 0.8, 0.8])
a.plot(rtc.time, rtc.open_loop)
a.plot(rtc.time, rtc.closed_loop)
a.plot(rtc.time, rtc.correction)
fig.show()
| gpl-2.0 |
jianmingtang/PIC-tools | Python/Figure/Figure2D.py | 1 | 2739 | # Copyright (C) 2014 Jian-Ming Tang <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Figure2D
--------
"""
import numpy
import matplotlib.pyplot as plt
class Figure2D:
"""
This class creates and stores 2D plots using Matplotlib.
There are two types of figures:
1. 4 panels with individual species
2. 1 panel of combined species
"""
def __init__(self):
# self.fig stores a list of figure objects
self.figs = []
def add_quad(self, name, X, Y, fZ):
"""
Create a 4-panel figure
name: title of the figure
X, Y: 1D axes data
fZ: 2D data set (Fortran indexing)
"""
# The default ordering for 2D meshgrid is Fortran style
title = name.replace(',', '_')
title = title.replace('=', '')
self.figs.append((title, plt.figure(title)))
for i in range(4):
ax = plt.subplot('22' + str(i + 1))
fX, fY = numpy.meshgrid(X[i], Y[i])
pcm = ax.pcolormesh(fX, fY, fZ[i])
ax.axis('tight')
self.figs[-1][1].colorbar(pcm)
plt.title(name + ',s=' + str(i))
def add_one(self, name, X, Y, fZ):
"""
Create a 1-panel figure
name: title of the figure
X, Y: 1D axes data
fZ: 2D data set (Fortran indexing)
"""
title = name.replace(',', '_')
title = title.replace('=', '')
self.figs.append((title, plt.figure(title)))
fX, fY = numpy.meshgrid(X, Y)
pcm = plt.pcolormesh(fX, fY, fZ)
plt.xlabel('X (de)')
plt.ylabel('Z (de)')
plt.axis('tight')
self.figs[-1][1].colorbar(pcm)
plt.title(name)
def add_streamline(self, X, Y, U, V):
plt.streamplot(X, Y, U, V, color='k', density=[5, 0.7])
def savefig(self):
"""
Save all figures in PNG format
"""
while self.figs != []:
fig = self.figs.pop()
fig[1].savefig(fig[0] + '.png', bbox_inches='tight')
print fig[0] + ' saved'
def show(self):
plt.show()
| gpl-3.0 |
theislab/scvelo | scvelo/plotting/velocity_embedding_grid.py | 1 | 10527 | from ..tools.velocity_embedding import quiver_autoscale, velocity_embedding
from ..tools.utils import groups_to_bool
from .utils import *
from .scatter import scatter
from .docs import doc_scatter, doc_params
from sklearn.neighbors import NearestNeighbors
from scipy.stats import norm as normal
from matplotlib import rcParams
import matplotlib.pyplot as pl
import numpy as np
def compute_velocity_on_grid(
X_emb,
V_emb,
density=None,
smooth=None,
n_neighbors=None,
min_mass=None,
autoscale=True,
adjust_for_stream=False,
cutoff_perc=None,
):
# remove invalid cells
idx_valid = np.isfinite(X_emb.sum(1) + V_emb.sum(1))
X_emb = X_emb[idx_valid]
V_emb = V_emb[idx_valid]
# prepare grid
n_obs, n_dim = X_emb.shape
density = 1 if density is None else density
smooth = 0.5 if smooth is None else smooth
grs = []
for dim_i in range(n_dim):
m, M = np.min(X_emb[:, dim_i]), np.max(X_emb[:, dim_i])
m = m - 0.01 * np.abs(M - m)
M = M + 0.01 * np.abs(M - m)
gr = np.linspace(m, M, int(50 * density))
grs.append(gr)
meshes_tuple = np.meshgrid(*grs)
X_grid = np.vstack([i.flat for i in meshes_tuple]).T
# estimate grid velocities
if n_neighbors is None:
n_neighbors = int(n_obs / 50)
nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=-1)
nn.fit(X_emb)
dists, neighs = nn.kneighbors(X_grid)
scale = np.mean([(g[1] - g[0]) for g in grs]) * smooth
weight = normal.pdf(x=dists, scale=scale)
p_mass = weight.sum(1)
V_grid = (V_emb[neighs] * weight[:, :, None]).sum(1)
V_grid /= np.maximum(1, p_mass)[:, None]
if min_mass is None:
min_mass = 1
if adjust_for_stream:
X_grid = np.stack([np.unique(X_grid[:, 0]), np.unique(X_grid[:, 1])])
ns = int(np.sqrt(len(V_grid[:, 0])))
V_grid = V_grid.T.reshape(2, ns, ns)
mass = np.sqrt((V_grid ** 2).sum(0))
min_mass = 10 ** (min_mass - 6) # default min_mass = 1e-5
min_mass = np.clip(min_mass, None, np.max(mass) * 0.9)
cutoff = mass.reshape(V_grid[0].shape) < min_mass
if cutoff_perc is None:
cutoff_perc = 5
length = np.sum(np.mean(np.abs(V_emb[neighs]), axis=1), axis=1).T
length = length.reshape(ns, ns)
cutoff |= length < np.percentile(length, cutoff_perc)
V_grid[0][cutoff] = np.nan
else:
min_mass *= np.percentile(p_mass, 99) / 100
X_grid, V_grid = X_grid[p_mass > min_mass], V_grid[p_mass > min_mass]
if autoscale:
V_grid /= 3 * quiver_autoscale(X_grid, V_grid)
return X_grid, V_grid
@doc_params(scatter=doc_scatter)
def velocity_embedding_grid(
adata,
basis=None,
vkey="velocity",
density=None,
smooth=None,
min_mass=None,
arrow_size=None,
arrow_length=None,
arrow_color=None,
scale=None,
autoscale=True,
n_neighbors=None,
recompute=None,
X=None,
V=None,
X_grid=None,
V_grid=None,
principal_curve=False,
color=None,
use_raw=None,
layer=None,
color_map=None,
colorbar=True,
palette=None,
size=None,
alpha=0.2,
perc=None,
sort_order=True,
groups=None,
components=None,
projection="2d",
legend_loc="none",
legend_fontsize=None,
legend_fontweight=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
figsize=None,
dpi=None,
frameon=None,
show=None,
save=None,
ax=None,
ncols=None,
**kwargs,
):
"""\
Scatter plot of velocities on a grid.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
density: `float` (default: 1)
Amount of velocities to show - 0 none to 1 all
arrow_size: `float` or triple `headlength, headwidth, headaxislength` (default: 1)
Size of arrows.
arrow_length: `float` (default: 1)
Length of arrows.
scale: `float` (default: 1)
Length of velocities in the embedding.
min_mass: `float` or `None` (default: `None`)
Minimum threshold for mass to be shown.
It can range between 0 (all velocities) and 100 (large velocities).
smooth: `float` (default: 0.5)
Multiplication factor for scale in Gaussian kernel around grid point.
n_neighbors: `int` (default: None)
Number of neighbors to consider around grid point.
X: `np.ndarray` (default: None)
embedding grid point coordinates
V: `np.ndarray` (default: None)
embedding grid velocity coordinates
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata, **kwargs) if basis is None else get_basis(adata, basis)
if vkey == "all":
lkeys = list(adata.layers.keys())
vkey = [key for key in lkeys if "velocity" in key and "_u" not in key]
color, color_map = kwargs.pop("c", color), kwargs.pop("cmap", color_map)
colors = make_unique_list(color, allow_array=True)
layers, vkeys = make_unique_list(layer), make_unique_list(vkey)
if V is None:
for key in vkeys:
if recompute or velocity_embedding_changed(adata, basis=basis, vkey=key):
velocity_embedding(adata, basis=basis, vkey=key)
color, layer, vkey = colors[0], layers[0], vkeys[0]
color = default_color(adata) if color is None else color
if X_grid is None or V_grid is None:
_adata = (
adata[groups_to_bool(adata, groups, groupby=color)]
if groups is not None and color in adata.obs.keys()
else adata
)
comps, obsm = get_components(components, basis), _adata.obsm
X_emb = np.array(obsm[f"X_{basis}"][:, comps]) if X is None else X[:, :2]
V_emb = np.array(obsm[f"{vkey}_{basis}"][:, comps]) if V is None else V[:, :2]
X_grid, V_grid = compute_velocity_on_grid(
X_emb=X_emb,
V_emb=V_emb,
density=density,
autoscale=autoscale,
smooth=smooth,
n_neighbors=n_neighbors,
min_mass=min_mass,
)
scatter_kwargs = {
"basis": basis,
"perc": perc,
"use_raw": use_raw,
"sort_order": sort_order,
"alpha": alpha,
"components": components,
"projection": projection,
"legend_loc": legend_loc,
"groups": groups,
"legend_fontsize": legend_fontsize,
"legend_fontweight": legend_fontweight,
"palette": palette,
"color_map": color_map,
"frameon": frameon,
"xlabel": xlabel,
"ylabel": ylabel,
"colorbar": colorbar,
"dpi": dpi,
"fontsize": fontsize,
"show": False,
"save": False,
}
multikey = (
colors
if len(colors) > 1
else layers
if len(layers) > 1
else vkeys
if len(vkeys) > 1
else None
)
if multikey is not None:
if title is None:
title = list(multikey)
elif isinstance(title, (list, tuple)):
title *= int(np.ceil(len(multikey) / len(title)))
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
figsize = rcParams["figure.figsize"] if figsize is None else figsize
figsize, dpi = get_figure_params(figsize, dpi, ncols)
gs_figsize = (figsize[0] * ncols, figsize[1] * nrows)
ax = []
for i, gs in enumerate(
pl.GridSpec(nrows, ncols, pl.figure(None, gs_figsize, dpi=dpi))
):
if i < len(multikey):
ax.append(
velocity_embedding_grid(
adata,
density=density,
scale=scale,
size=size,
min_mass=min_mass,
smooth=smooth,
n_neighbors=n_neighbors,
principal_curve=principal_curve,
ax=pl.subplot(gs),
arrow_size=arrow_size,
arrow_length=arrow_length,
color=colors[i] if len(colors) > 1 else color,
layer=layers[i] if len(layers) > 1 else layer,
vkey=vkeys[i] if len(vkeys) > 1 else vkey,
title=title[i] if isinstance(title, (list, tuple)) else title,
X_grid=None if len(vkeys) > 1 else X_grid,
V_grid=None if len(vkeys) > 1 else V_grid,
autoscale=False if len(vkeys) > 1 else autoscale,
**scatter_kwargs,
**kwargs,
)
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
else:
ax, show = get_ax(ax, show, figsize, dpi)
hl, hw, hal = default_arrow(arrow_size)
if arrow_length is not None:
scale = 1 / arrow_length
if scale is None:
scale = 1
if arrow_color is None:
arrow_color = "grey"
quiver_kwargs = {"angles": "xy", "scale_units": "xy", "edgecolors": "k"}
quiver_kwargs.update({"scale": scale, "width": 0.001, "headlength": hl / 2})
quiver_kwargs.update({"headwidth": hw / 2, "headaxislength": hal / 2})
quiver_kwargs.update({"color": arrow_color, "linewidth": 0.2, "zorder": 3})
for arg in list(kwargs):
if arg in quiver_kwargs:
quiver_kwargs.update({arg: kwargs[arg]})
else:
scatter_kwargs.update({arg: kwargs[arg]})
ax.quiver(
X_grid[:, 0], X_grid[:, 1], V_grid[:, 0], V_grid[:, 1], **quiver_kwargs
)
if principal_curve:
curve = adata.uns["principal_curve"]["projections"]
pl.plot(curve[:, 0], curve[:, 1], c="w", lw=6, zorder=4)
pl.plot(curve[:, 0], curve[:, 1], c="k", lw=3, zorder=5)
size = 4 * default_size(adata) if size is None else size
ax = scatter(
adata,
layer=layer,
color=color,
size=size,
title=title,
ax=ax,
zorder=0,
**scatter_kwargs,
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
| bsd-3-clause |
ElDeveloper/scikit-learn | benchmarks/bench_mnist.py | 44 | 6801 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
3324fr/spinalcordtoolbox | scripts/sct_get_centerline.py | 1 | 49809 | #!/usr/bin/env python
# ==========================================================================================
#
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Geoffrey Leveque, Olivier Comtois
#
# License: see the LICENSE.TXT
# ==========================================================================================
from msct_base_classes import BaseScript, Algorithm
import numpy as np
from sct_straighten_spinalcord import smooth_centerline
import sct_convert as conv
from msct_parser import Parser
from nibabel import load, save, Nifti1Image
from sct_process_segmentation import extract_centerline
import os
import commands
import sys
from time import strftime, time
import sct_utils as sct
from numpy import mgrid, zeros, exp, unravel_index, argmax, poly1d, polyval, linalg, max, polyfit, sqrt, abs, savetxt
import glob
#from sct_utils import fsloutput
from sct_image import get_orientation_3d, set_orientation
from sct_convert import convert
from msct_image import Image
from sct_image import copy_header, split_data, concat_data
from scipy.ndimage.filters import gaussian_filter
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1 # verbose
self.remove_temp_files = 1
self.type_window = 'hanning' # for smooth_centerline @sct_straighten_spinalcord
self.window_length = 80 # for smooth_centerline @sct_straighten_spinalcord
self.algo_fitting = 'nurbs'
self.list_file = []
self.output_file_name = ''
self.schedule_file = 'flirtsch/schedule_TxTy.sch'
self.gap = 4 # default gap between co-registered slices.
self.gaussian_kernel = 4 # gaussian kernel for creating gaussian mask from center point.
self.deg_poly = 10 # maximum degree of polynomial function for fitting centerline.
self.remove_tmp_files = 1 # remove temporary files
def get_centerline_from_point(input_image, point_file, gap=4, gaussian_kernel=4, remove_tmp_files=1):
# Initialization
fname_anat = input_image
fname_point = point_file
slice_gap = gap
remove_tmp_files = remove_tmp_files
gaussian_kernel = gaussian_kernel
start_time = time()
verbose = 1
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
path_sct = sct.slash_at_the_end(path_sct, 1)
# Parameters for debug mode
if param.debug == 1:
sct.printv('\n*** WARNING: DEBUG MODE ON ***\n\t\t\tCurrent working directory: '+os.getcwd(), 'warning')
status, path_sct_testing_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
fname_anat = path_sct_testing_data+'/t2/t2.nii.gz'
fname_point = path_sct_testing_data+'/t2/t2_centerline_init.nii.gz'
slice_gap = 5
# check existence of input files
sct.check_file_exist(fname_anat)
sct.check_file_exist(fname_point)
# extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_point, file_point, ext_point = sct.extract_fname(fname_point)
# extract path of schedule file
# TODO: include schedule file in sct
# TODO: check existence of schedule file
file_schedule = path_sct + param.schedule_file
# Get input image orientation
input_image_orientation = get_orientation_3d(fname_anat, filename=True)
# Display arguments
print '\nCheck input arguments...'
print ' Anatomical image: '+fname_anat
print ' Orientation: '+input_image_orientation
print ' Point in spinal cord: '+fname_point
print ' Slice gap: '+str(slice_gap)
print ' Gaussian kernel: '+str(gaussian_kernel)
print ' Degree of polynomial: '+str(param.deg_poly)
# create temporary folder
print('\nCreate temporary folder...')
path_tmp = 'tmp.'+strftime('%y%m%d%H%M%S')
sct.create_folder(path_tmp)
print '\nCopy input data...'
sct.run('cp '+fname_anat+ ' '+path_tmp+'/tmp.anat'+ext_anat)
sct.run('cp '+fname_point+ ' '+path_tmp+'/tmp.point'+ext_point)
# go to temporary folder
os.chdir(path_tmp)
# convert to nii
im_anat = convert('tmp.anat'+ext_anat, 'tmp.anat.nii')
im_point = convert('tmp.point'+ext_point, 'tmp.point.nii')
# Reorient input anatomical volume into RL PA IS orientation
print '\nReorient input volume to RL PA IS orientation...'
set_orientation(im_anat, 'RPI')
im_anat.setFileName('tmp.anat_orient.nii')
# Reorient binary point into RL PA IS orientation
print '\nReorient binary point into RL PA IS orientation...'
# sct.run(sct.fsloutput + 'fslswapdim tmp.point RL PA IS tmp.point_orient')
set_orientation(im_point, 'RPI')
im_point.setFileName('tmp.point_orient.nii')
# Get image dimensions
print '\nGet image dimensions...'
nx, ny, nz, nt, px, py, pz, pt = Image('tmp.anat_orient.nii').dim
print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
print '.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
# Split input volume
print '\nSplit input volume...'
im_anat_split_list = split_data(im_anat, 2)
file_anat_split = []
for im in im_anat_split_list:
file_anat_split.append(im.absolutepath)
im.save()
im_point_split_list = split_data(im_point, 2)
file_point_split = []
for im in im_point_split_list:
file_point_split.append(im.absolutepath)
im.save()
# Extract coordinates of input point
data_point = Image('tmp.point_orient.nii').data
x_init, y_init, z_init = unravel_index(data_point.argmax(), data_point.shape)
sct.printv('Coordinates of input point: ('+str(x_init)+', '+str(y_init)+', '+str(z_init)+')', verbose)
# Create 2D gaussian mask
sct.printv('\nCreate gaussian mask from point...', verbose)
xx, yy = mgrid[:nx, :ny]
mask2d = zeros((nx, ny))
radius = round(float(gaussian_kernel+1)/2) # add 1 because the radius includes the center.
sigma = float(radius)
mask2d = exp(-(((xx-x_init)**2)/(2*(sigma**2)) + ((yy-y_init)**2)/(2*(sigma**2))))
# Save mask to 2d file
file_mask_split = ['tmp.mask_orient_Z'+str(z).zfill(4) for z in range(0, nz, 1)]
nii_mask2d = Image('tmp.anat_orient_Z0000.nii')
nii_mask2d.data = mask2d
nii_mask2d.setFileName(file_mask_split[z_init]+'.nii')
nii_mask2d.save()
# initialize variables
file_mat = ['tmp.mat_Z'+str(z).zfill(4) for z in range(0, nz, 1)]
file_mat_inv = ['tmp.mat_inv_Z'+str(z).zfill(4) for z in range(0, nz, 1)]
file_mat_inv_cumul = ['tmp.mat_inv_cumul_Z'+str(z).zfill(4) for z in range(0, nz, 1)]
# create identity matrix for initial transformation matrix
fid = open(file_mat_inv_cumul[z_init], 'w')
fid.write('%i %i %i %i\n' % (1, 0, 0, 0))
fid.write('%i %i %i %i\n' % (0, 1, 0, 0))
fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
fid.close()
# initialize centerline: give value corresponding to initial point
x_centerline = [x_init]
y_centerline = [y_init]
z_centerline = [z_init]
warning_count = 0
# go up (1), then down (2) in reference to the binary point
for iUpDown in range(1, 3):
if iUpDown == 1:
# z increases
slice_gap_signed = slice_gap
elif iUpDown == 2:
# z decreases
slice_gap_signed = -slice_gap
# reverse centerline (because values will be appended at the end)
x_centerline.reverse()
y_centerline.reverse()
z_centerline.reverse()
# initialization before looping
z_dest = z_init # point given by user
z_src = z_dest + slice_gap_signed
# continue looping if 0 <= z < nz
while 0 <= z_src < nz:
# print current z:
print 'z='+str(z_src)+':'
# estimate transformation
sct.run(fsloutput+'flirt -in '+file_anat_split[z_src]+' -ref '+file_anat_split[z_dest]+' -schedule ' +
file_schedule + ' -verbose 0 -omat ' + file_mat[z_src] +
' -cost normcorr -forcescaling -inweight ' + file_mask_split[z_dest] +
' -refweight '+file_mask_split[z_dest])
# display transfo
status, output = sct.run('cat '+file_mat[z_src])
print output
# check if transformation is bigger than 1.5x slice_gap
tx = float(output.split()[3])
ty = float(output.split()[7])
norm_txy = linalg.norm([tx, ty], ord=2)
if norm_txy > 1.5*slice_gap:
print 'WARNING: Transformation is too large --> using previous one.'
warning_count = warning_count + 1
# if previous transformation exists, replace current one with previous one
if os.path.isfile(file_mat[z_dest]):
sct.run('cp '+file_mat[z_dest]+' '+file_mat[z_src])
# estimate inverse transformation matrix
sct.run('convert_xfm -omat '+file_mat_inv[z_src]+' -inverse '+file_mat[z_src])
# compute cumulative transformation
sct.run('convert_xfm -omat '+file_mat_inv_cumul[z_src]+' -concat '+file_mat_inv[z_src]+' '+file_mat_inv_cumul[z_dest])
# apply inverse cumulative transformation to initial gaussian mask (to put it in src space)
sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul[z_src]+' -out '+file_mask_split[z_src])
# open inverse cumulative transformation file and generate centerline
fid = open(file_mat_inv_cumul[z_src])
mat = fid.read().split()
x_centerline.append(x_init + float(mat[3]))
y_centerline.append(y_init + float(mat[7]))
z_centerline.append(z_src)
#z_index = z_index+1
# define new z_dest (target slice) and new z_src (moving slice)
z_dest = z_dest + slice_gap_signed
z_src = z_src + slice_gap_signed
# Reconstruct centerline
# ====================================================================================================
# reverse back centerline (because it's been reversed once, so now all values are in the right order)
x_centerline.reverse()
y_centerline.reverse()
z_centerline.reverse()
# fit centerline in the Z-X plane using polynomial function
print '\nFit centerline in the Z-X plane using polynomial function...'
coeffsx = polyfit(z_centerline, x_centerline, deg=param.deg_poly)
polyx = poly1d(coeffsx)
x_centerline_fit = polyval(polyx, z_centerline)
# calculate RMSE
rmse = linalg.norm(x_centerline_fit-x_centerline)/sqrt( len(x_centerline) )
# calculate max absolute error
max_abs = max(abs(x_centerline_fit-x_centerline))
print '.. RMSE (in mm): '+str(rmse*px)
print '.. Maximum absolute error (in mm): '+str(max_abs*px)
# fit centerline in the Z-Y plane using polynomial function
print '\nFit centerline in the Z-Y plane using polynomial function...'
coeffsy = polyfit(z_centerline, y_centerline, deg=param.deg_poly)
polyy = poly1d(coeffsy)
y_centerline_fit = polyval(polyy, z_centerline)
# calculate RMSE
rmse = linalg.norm(y_centerline_fit-y_centerline)/sqrt( len(y_centerline) )
# calculate max absolute error
max_abs = max( abs(y_centerline_fit-y_centerline) )
print '.. RMSE (in mm): '+str(rmse*py)
print '.. Maximum absolute error (in mm): '+str(max_abs*py)
# display
if param.debug == 1:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(z_centerline,x_centerline,'.',z_centerline,x_centerline_fit,'r')
plt.legend(['Data','Polynomial Fit'])
plt.title('Z-X plane polynomial interpolation')
plt.show()
plt.figure()
plt.plot(z_centerline,y_centerline,'.',z_centerline,y_centerline_fit,'r')
plt.legend(['Data','Polynomial Fit'])
plt.title('Z-Y plane polynomial interpolation')
plt.show()
# generate full range z-values for centerline
z_centerline_full = [iz for iz in range(0, nz, 1)]
# calculate X and Y values for the full centerline
x_centerline_fit_full = polyval(polyx, z_centerline_full)
y_centerline_fit_full = polyval(polyy, z_centerline_full)
# Generate fitted transformation matrices and write centerline coordinates in text file
print '\nGenerate fitted transformation matrices and write centerline coordinates in text file...'
file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
file_mat_cumul_fit = ['tmp.mat_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
fid_centerline = open('tmp.centerline_coordinates.txt', 'w')
for iz in range(0, nz, 1):
# compute inverse cumulative fitted transformation matrix
fid = open(file_mat_inv_cumul_fit[iz], 'w')
fid.write('%i %i %i %f\n' % (1, 0, 0, x_centerline_fit_full[iz]-x_init))
fid.write('%i %i %i %f\n' % (0, 1, 0, y_centerline_fit_full[iz]-y_init))
fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
fid.close()
# compute forward cumulative fitted transformation matrix
sct.run('convert_xfm -omat '+file_mat_cumul_fit[iz]+' -inverse '+file_mat_inv_cumul_fit[iz])
# write centerline coordinates in x, y, z format
fid_centerline.write('%f %f %f\n' %(x_centerline_fit_full[iz], y_centerline_fit_full[iz], z_centerline_full[iz]) )
fid_centerline.close()
# Prepare output data
# ====================================================================================================
# write centerline as text file
for iz in range(0, nz, 1):
# compute inverse cumulative fitted transformation matrix
fid = open(file_mat_inv_cumul_fit[iz], 'w')
fid.write('%i %i %i %f\n' % (1, 0, 0, x_centerline_fit_full[iz]-x_init))
fid.write('%i %i %i %f\n' % (0, 1, 0, y_centerline_fit_full[iz]-y_init))
fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
fid.close()
# write polynomial coefficients
savetxt('tmp.centerline_polycoeffs_x.txt',coeffsx)
savetxt('tmp.centerline_polycoeffs_y.txt',coeffsy)
# apply transformations to data
print '\nApply fitted transformation matrices...'
file_anat_split_fit = ['tmp.anat_orient_fit_z'+str(z).zfill(4) for z in range(0, nz, 1)]
file_mask_split_fit = ['tmp.mask_orient_fit_z'+str(z).zfill(4) for z in range(0, nz, 1)]
file_point_split_fit = ['tmp.point_orient_fit_z'+str(z).zfill(4) for z in range(0, nz, 1)]
for iz in range(0, nz, 1):
# forward cumulative transformation to data
sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_cumul_fit[iz]+' -out '+file_anat_split_fit[iz])
# inverse cumulative transformation to mask
sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_mask_split_fit[iz])
# inverse cumulative transformation to point
sct.run(fsloutput+'flirt -in '+file_point_split[z_init]+' -ref '+file_point_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_point_split_fit[iz]+' -interp nearestneighbour')
# Merge into 4D volume
print '\nMerge into 4D volume...'
# im_anat_list = [Image(fname) for fname in glob.glob('tmp.anat_orient_fit_z*.nii')]
fname_anat_list = glob.glob('tmp.anat_orient_fit_z*.nii')
im_anat_concat = concat_data(fname_anat_list, 2)
im_anat_concat.setFileName('tmp.anat_orient_fit.nii')
im_anat_concat.save()
# im_mask_list = [Image(fname) for fname in glob.glob('tmp.mask_orient_fit_z*.nii')]
fname_mask_list = glob.glob('tmp.mask_orient_fit_z*.nii')
im_mask_concat = concat_data(fname_mask_list, 2)
im_mask_concat.setFileName('tmp.mask_orient_fit.nii')
im_mask_concat.save()
# im_point_list = [Image(fname) for fname in glob.glob('tmp.point_orient_fit_z*.nii')]
fname_point_list = glob.glob('tmp.point_orient_fit_z*.nii')
im_point_concat = concat_data(fname_point_list, 2)
im_point_concat.setFileName('tmp.point_orient_fit.nii')
im_point_concat.save()
# Copy header geometry from input data
print '\nCopy header geometry from input data...'
im_anat = Image('tmp.anat_orient.nii')
im_anat_orient_fit = Image('tmp.anat_orient_fit.nii')
im_mask_orient_fit = Image('tmp.mask_orient_fit.nii')
im_point_orient_fit = Image('tmp.point_orient_fit.nii')
im_anat_orient_fit = copy_header(im_anat, im_anat_orient_fit)
im_mask_orient_fit = copy_header(im_anat, im_mask_orient_fit)
im_point_orient_fit = copy_header(im_anat, im_point_orient_fit)
for im in [im_anat_orient_fit, im_mask_orient_fit, im_point_orient_fit]:
im.save()
# Reorient outputs into the initial orientation of the input image
print '\nReorient the centerline into the initial orientation of the input image...'
set_orientation('tmp.point_orient_fit.nii', input_image_orientation, 'tmp.point_orient_fit.nii')
set_orientation('tmp.mask_orient_fit.nii', input_image_orientation, 'tmp.mask_orient_fit.nii')
# Generate output file (in current folder)
print '\nGenerate output file (in current folder)...'
os.chdir('..') # come back to parent folder
fname_output_centerline = sct.generate_output_file(path_tmp+'/tmp.point_orient_fit.nii', file_anat+'_centerline'+ext_anat)
# Delete temporary files
if remove_tmp_files == 1:
print '\nRemove temporary files...'
sct.run('rm -rf '+path_tmp, error_exit='warning')
# print number of warnings
print '\nNumber of warnings: '+str(warning_count)+' (if >10, you should probably reduce the gap and/or increase the kernel size'
# display elapsed time
elapsed_time = time() - start_time
print '\nFinished! \n\tGenerated file: '+fname_output_centerline+'\n\tElapsed time: '+str(int(round(elapsed_time)))+'s\n'
def get_centerline_from_labels(fname_in, list_fname_labels, param, output_file_name=None, remove_temp_files=1, verbose=0):
path, file, ext = sct.extract_fname(fname_in)
# create temporary folder
path_tmp = sct.slash_at_the_end('tmp.'+strftime('%y%m%d%H%M%S'), 1)
sct.run('mkdir '+path_tmp)
# Copying input data to tmp folder
sct.printv('\nCopying input data to tmp folder...', verbose)
sct.run('sct_convert -i '+fname_in+' -o '+path_tmp+'data.nii')
file_labels = []
for i in range(len(list_fname_labels)):
file_labels.append('labels_'+str(i)+'.nii.gz')
sct.run('sct_convert -i '+list_fname_labels[i]+' -o '+path_tmp+file_labels[i])
# go to tmp folder
os.chdir(path_tmp)
## Concatenation of the files
# Concatenation : sum of matrices
file_0 = Image('data.nii')
data_concatenation = file_0.data
hdr_0 = file_0.hdr
orientation_file_0 = get_orientation_3d(file_0)
if len(list_fname_labels) > 0:
for i in range(0, len(list_fname_labels)):
orientation_file_temp = get_orientation_3d(file_labels[i], filename=True)
if orientation_file_0 != orientation_file_temp :
print 'ERROR: The files ', fname_in, ' and ', file_labels[i], ' are not in the same orientation. Use sct_image -setorient to change the orientation of a file.'
sys.exit(2)
file_temp = load(file_labels[i])
data_temp = file_temp.get_data()
data_concatenation = data_concatenation + data_temp
# Save concatenation as a file
print '\nWrite NIFTI volumes...'
img = Nifti1Image(data_concatenation, None, hdr_0)
save(img, 'concatenation_file.nii.gz')
# Applying nurbs to the concatenation and save file as binary file
fname_output = extract_centerline('concatenation_file.nii.gz', remove_temp_files = remove_temp_files, verbose = verbose, algo_fitting=param.algo_fitting, type_window=param.type_window, window_length=param.window_length)
# Rename files after processing
if output_file_name != None:
output_file_name = output_file_name
else : output_file_name = 'generated_centerline.nii.gz'
os.rename(fname_output, output_file_name)
path_binary, file_binary, ext_binary = sct.extract_fname(output_file_name)
os.rename('concatenation_file_centerline.txt', file_binary+'.txt')
# Process for a binary file as output:
sct.run('cp '+output_file_name+' ../')
# Process for a text file as output:
sct.run('cp '+file_binary+ '.txt'+ ' ../')
os.chdir('../')
# Remove temporary files
if remove_temp_files:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp, error_exit='warning')
# Display results
# The concatenate centerline and its fitted curve are displayed whithin extract_centerline
def smooth_minimal_path(img, nb_pixels=1):
"""
Function intended to smooth the minimal path result in the R-L/A-P directions with a gaussian filter
of a kernel of size nb_pixels
:param img: Image to be smoothed (is intended to be minimal path image)
:param nb_pixels: kernel size of the gaussian filter
:return: returns a smoothed image
"""
nx, ny, nz, nt, px, py, pz, pt = img.dim
from scipy.ndimage.filters import gaussian_filter
raw_orientation = img.change_orientation()
img.data = gaussian_filter(img.data, [nb_pixels/px, nb_pixels/py, 0])
img.change_orientation(raw_orientation)
return img
def symmetry_detector_right_left(data, cropped_xy=0):
"""
This function
:param img: input image used for the algorithm
:param cropped_xy: 1 when we want to crop around the center for the correlation, 0 when not
:return: returns an image that is the body symmetry (correlation between left and right side of the image)
"""
from scipy.ndimage.filters import gaussian_filter
# Change orientation and define variables for
data = np.squeeze(data)
dim = data.shape
img_data = gaussian_filter(data, [0, 5, 5])
# Cropping around center of image to remove side noise
if cropped_xy:
x_mid = np.round(dim[0]/2)
x_crop_min = int(x_mid - (0.25/2)*dim[0])
x_crop_max = int(x_mid + (0.25/2)*dim[0])
img_data[0:x_crop_min,:,:] = 0
img_data[x_crop_max:-1,:,:] = 0
# Acquiring a slice and inverted slice for correlation
slice_p = np.squeeze(np.sum(img_data, 1))
slice_p_reversed = np.flipud(slice_p)
# initialise containers for correlation
m, n = slice_p.shape
cross_corr = ((2*m)-1, n)
cross_corr = np.zeros(cross_corr)
for iz in range(0, np.size(slice_p[1])):
corr1 = slice_p[:, iz]
corr2 = slice_p_reversed[:, iz]
cross_corr[:, iz] = np.double(np.correlate(corr1, corr2, 'full'))
max_value = np.max(cross_corr[:, iz])
if max_value == 0:
cross_corr[:, iz] = 0
else:
cross_corr[:, iz] = cross_corr[:, iz]/max_value
data_out = np.zeros((dim[0], dim[2]))
index1 = np.round(np.linspace(0,2*m-3, m))
index2 = np.round(np.linspace(1,2*m-2, m))
for i in range(0,m):
indx1 = int(index1[i])
indx2 = int(index2[i])
out1 = cross_corr[indx1, :]
out2 = cross_corr[indx2, :]
data_out[i, :] = 0.5*(out1 + out2)
result = np.hstack([data_out[:, np.newaxis, :] for i in range(0, dim[1])])
return result
def normalize_array_histogram(array):
"""
Equalizes the data in array
:param array:
:return:
"""
array_min = np.amin(array)
array -= array_min
array_max = np.amax(array)
array /= array_max
return array
def get_minimum_path(data, smooth_factor=np.sqrt(2), invert=1, verbose=1, debug=0):
"""
This method returns the minimal path of the image
:param data: input data of the image
:param smooth_factor:factor used to smooth the directions that are not up-down
:param invert: inverts the image data for the algorithm. The algorithm works better if the image data is inverted
:param verbose:
:param debug:
:return:
"""
[m, n, p] = data.shape
max_value = np.amax(data)
if invert:
data=max_value-data
J1 = np.ones([m, n, p])*np.inf
J2 = np.ones([m, n, p])*np.inf
J1[:, :, 0] = 0
for row in range(1, p):
pJ = J1[:, :, row-1]
cP = np.squeeze(data[1:-2, 1:-2, row])
VI = np.dstack((cP*smooth_factor, cP*smooth_factor, cP, cP*smooth_factor, cP*smooth_factor))
Jq = np.dstack((pJ[0:-3, 1:-2], pJ[1:-2, 0:-3], pJ[1:-2, 1:-2], pJ[1:-2, 2:-1], pJ[2:-1, 1:-2]))
J1[1:-2, 1:-2, row] = np.min(Jq+VI, 2)
pass
J2[:, :, p-1] = 0
for row in range(p-2, -1, -1):
pJ = J2[:, :, row+1]
cP = np.squeeze(data[1:-2, 1:-2, row])
VI = np.dstack((cP*smooth_factor, cP*smooth_factor, cP, cP*smooth_factor, cP*smooth_factor))
Jq = np.dstack((pJ[0:-3, 1:-2], pJ[1:-2, 0:-3], pJ[1:-2, 1:-2], pJ[1:-2, 2:-1], pJ[2:-1, 1:-2]))
J2[1:-2, 1:-2, row] = np.min(Jq+VI, 2)
pass
result = J1+J2
if invert:
percent = np.percentile(result, 50)
result[result > percent] = percent
result_min = np.amin(result)
result_max = np.amax(result)
result = np.divide(np.subtract(result, result_min), result_max)
result_max = np.amax(result)
result = 1-result
result[result == np.inf] = 0
result[result == np.nan] = 0
return result, J1, J2
def get_minimum_path_nii(fname):
from msct_image import Image
data=Image(fname)
vesselness_data = data.data
raw_orient=data.change_orientation()
result ,J1, J2 = get_minimum_path(data.data, invert=1)
data.data = result
data.change_orientation(raw_orient)
data.file_name += '_minimalpath'
data.save()
def ind2sub(array_shape, ind):
"""
:param array_shape: shape of the array
:param ind: index number
:return: coordinates equivalent to the index number for a given array shape
"""
rows = (ind.astype('int') / array_shape[1])
cols = (ind.astype('int') % array_shape[1]) # or numpy.mod(ind.astype('int'), array_shape[1])
return rows, cols
def get_centerline(data, dim):
"""
This function extracts the highest value per slice from a minimal path image
and builds the centerline from it
:param data:
:param dim:
:return:
"""
centerline = np.zeros(dim)
data[data == np.inf] = 0
data[data == np.nan] = 0
for iz in range(0, dim[2]):
ind = np.argmax(data[:, :, iz])
X, Y = ind2sub(data[:, :, iz].shape,ind)
centerline[X,Y,iz] = 1
return centerline
class SymmetryDetector(Algorithm):
def __init__(self, input_image, contrast=None, verbose=0, direction='lr', nb_sections=1, crop_xy=1):
super(SymmetryDetector, self).__init__(input_image)
self._contrast = contrast
self._verbose = verbose
self.direction = direction
self.nb_sections = nb_sections
self.crop_xy = crop_xy
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
if value in ['t1', 't2']:
self._contrast = value
else:
raise Exception('ERROR: contrast value must be t1 or t2')
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, value):
if value in [0, 1]:
self._verbose = value
else:
raise Exception('ERROR: verbose value must be an integer and equal to 0 or 1')
def execute(self):
"""
This method executes the symmetry detection
:return: returns the symmetry data
"""
img = Image(self.input_image)
raw_orientation = img.change_orientation()
data = np.squeeze(img.data)
dim = data.shape
section_length = dim[1]/self.nb_sections
result = np.zeros(dim)
for i in range(0, self.nb_sections):
if (i+1)*section_length > dim[1]:
y_length = (i+1)*section_length - ((i+1)*section_length - dim[1])
result[:, i*section_length:i*section_length + y_length, :] = symmetry_detector_right_left(data[:, i*section_length:i*section_length + y_length, :], cropped_xy=self.crop_xy)
sym = symmetry_detector_right_left(data[:, i*section_length:(i+1)*section_length, :], cropped_xy=self.crop_xy)
result[:, i*section_length:(i+1)*section_length, :] = sym
result_image = Image(img)
if len(result_image.data) == 4:
result_image.data = result[:,:,:,np.newaxis]
else:
result_image.data = result
result_image.change_orientation(raw_orientation)
return result_image.data
class SCAD(Algorithm):
def __init__(self, input_image, contrast=None, verbose=1, rm_tmp_file=0,output_filename=None, debug=0, vesselness_provided=0, minimum_path_exponent=100, enable_symmetry=0, symmetry_exponent=0, spinalcord_radius = 3, smooth_vesselness = 0):
"""
Constructor for the automatic spinal cord detection
:param output_filename: Name of the result file of the centerline detection. Must contain the extension (.nii / .nii.gz)
:param input_image:
:param contrast:
:param verbose:
:param rm_tmp_file:
:param debug:
:param produce_output: Produce output debug files,
:param vesselness_provided: Activate if the vesselness filter image is already provided (to save time),
the image is expected to be in the same folder as the input image
:return:
"""
produce_output = 0
if verbose == 2:
produce_output = 1
super(SCAD, self).__init__(input_image, produce_output=produce_output)
self._contrast = contrast
self._verbose = verbose
self.output_filename = input_image.file_name + '_centerline.nii.gz'
if output_filename is not None:
self.output_filename = output_filename
self.rm_tmp_file = rm_tmp_file
self.debug = debug
self.vesselness_provided = vesselness_provided
self.minimum_path_exponent = minimum_path_exponent
self.enable_symmetry = enable_symmetry
self.symmetry_exponent = symmetry_exponent
self.spinalcord_radius = spinalcord_radius
self.smooth_vesselness = smooth_vesselness
# attributes used in the algorithm
self.raw_orientation = None
self.raw_symmetry = None
self.J1_min_path = None
self.J2_min_path = None
self.minimum_path_data = None
self.minimum_path_powered = None
self.smoothed_min_path = None
self.spine_detect_data = None
self.centerline_with_outliers = None
self.debug_folder = None
self.path_tmp = None
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
if value in ['t1', 't2']:
self._contrast = value
else:
raise Exception('ERROR: contrast value must be t1 or t2')
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, value):
if value in [0, 1, 2]:
self._verbose = value
else:
raise Exception('ERROR: verbose value must be an integer and equal to 0 or 1')
def produce_output_files(self):
"""
Method used to output all debug files at the same time. To be used after the algorithm is executed
:return:
"""
import time
from sct_utils import slash_at_the_end
path_tmp = slash_at_the_end('scad_output_'+time.strftime('%y%m%d%H%M%S'), 1)
sct.run('mkdir '+path_tmp, self.verbose)
# getting input image header
img = self.input_image.copy()
# saving body symmetry
img.data = self.raw_symmetry
img.change_orientation(self.raw_orientation)
img.file_name += 'body_symmetry'
img.save()
# saving minimum paths
img.data = self.minimum_path_data
img.change_orientation(self.raw_orientation)
img.file_name = 'min_path'
img.save()
img.data = self.J1_min_path
img.change_orientation(self.raw_orientation)
img.file_name = 'J1_min_path'
img.save()
img.data = self.J2_min_path
img.change_orientation(self.raw_orientation)
img.file_name = 'J2_min_path'
img.save()
# saving minimum path powered
img.data = self.minimum_path_powered
img.change_orientation(self.raw_orientation)
img.file_name = 'min_path_powered_'+str(self.minimum_path_exponent)
img.save()
# saving smoothed min path
img = self.smoothed_min_path.copy()
img.change_orientation(self.raw_orientation)
img.file_name = 'min_path_power_'+str(self.minimum_path_exponent)+'_smoothed'
img.save()
# save symmetry_weighted_minimal_path
img.data = self.spine_detect_data
img.change_orientation(self.raw_orientation)
img.file_name = 'symmetry_weighted_minimal_path'
img.save()
def output_debug_file(self, img, data, file_name):
"""
This method writes a nifti file that corresponds to a step in the algorithm for easy debug.
The new nifti file uses the header from the the image passed as parameter
:param data: data to be written to file
:param file_name: filename...
:return: None
"""
if self.verbose == 2:
current_folder = os.getcwd()
#os.chdir(self.path_tmp)
try:
img = Image(img)
img.data = data
img.change_orientation(self.raw_orientation)
img.file_name = file_name
img.save()
except Exception, e:
print e
#os.chdir(current_folder)
def setup_debug_folder(self):
"""
Sets up the folder for the step by step files for this algorithm
The folder's absolute path can be found in the self.debug_folder property
:return: None
"""
if self.produce_output:
import time
from sct_utils import slash_at_the_end
folder = slash_at_the_end('scad_output_'+time.strftime('%y%m%d%H%M%S'), 1)
sct.run('mkdir '+folder, self.verbose)
self.debug_folder = os.path.abspath(folder)
conv.convert(str(self.input_image.absolutepath), str(self.debug_folder)+'/raw.nii.gz')
def create_temporary_path(self):
import time
from sct_utils import slash_at_the_end
path_tmp = slash_at_the_end('tmp.'+time.strftime('%y%m%d%H%M%S'), 1)
sct.run('mkdir '+path_tmp, self.verbose)
return path_tmp
def execute(self):
print 'Execution of the SCAD algorithm in '+str(os.getcwd())
original_name = self.input_image.file_name
vesselness_file_name = 'imageVesselNessFilter.nii.gz'
raw_file_name = 'raw.nii'
# self.setup_debug_folder()
if self.debug:
import matplotlib.pyplot as plt # import for debug purposes
# create tmp and copy input
self.path_tmp = self.create_temporary_path()
conv.convert(self.input_image.absolutepath, self.path_tmp+raw_file_name)
if self.vesselness_provided:
sct.run('cp '+vesselness_file_name+' '+self.path_tmp+vesselness_file_name)
os.chdir(self.path_tmp)
# get input image information
img = Image(raw_file_name)
# save original orientation and change image to RPI
self.raw_orientation = img.change_orientation()
# get body symmetry
if self.enable_symmetry:
from msct_image import change_data_orientation
sym = SymmetryDetector(raw_file_name, self.contrast, crop_xy=0)
self.raw_symmetry = sym.execute()
img.change_orientation(self.raw_orientation)
self.output_debug_file(img, self.raw_symmetry, 'body_symmetry')
img.change_orientation()
if self.smooth_vesselness:
from msct_image import change_data_orientation
img.data = gaussian_filter(img.data, [10,10, 1])
self.output_debug_file(img, img.data, "raw_smooth")
normalised_symmetry = normalize_array_histogram(self.raw_symmetry)
# normalized_data = normalize_array_histogram(img.data)
img.data = np.multiply(img.data, change_data_orientation(normalised_symmetry, self.raw_orientation, "RPI"))
img.file_name = "symmetry_x_rawsmoothed"
raw_file_name = img.file_name + img.ext
img.change_orientation(self.raw_orientation)
img.save()
self._contrast = "t1"
# vesselness filter
if not self.vesselness_provided:
sct.run('isct_vesselness -i '+raw_file_name+' -t ' + self._contrast+' -radius '+str(self.spinalcord_radius))
# load vesselness filter data and perform minimum path on it
img = Image(vesselness_file_name)
img.change_orientation()
self.minimum_path_data, self.J1_min_path, self.J2_min_path = get_minimum_path(img.data, invert=1, debug=1)
self.output_debug_file(img, self.minimum_path_data, 'minimal_path')
self.output_debug_file(img, self.J1_min_path, 'J1_minimal_path')
self.output_debug_file(img, self.J2_min_path, 'J2_minimal_path')
# Apply an exponent to the minimum path
self.minimum_path_powered = np.power(self.minimum_path_data, self.minimum_path_exponent)
self.output_debug_file(img, self.minimum_path_powered, 'minimal_path_power_'+str(self.minimum_path_exponent))
# Saving in Image since smooth_minimal_path needs pixel dimensions
img.data = self.minimum_path_powered
# smooth resulting minimal path
self.smoothed_min_path = smooth_minimal_path(img)
self.output_debug_file(img, self.smoothed_min_path.data, 'minimal_path_smooth')
# normalise symmetry values between 0 and 1
if self.enable_symmetry:
normalised_symmetry = normalize_array_histogram(self.raw_symmetry)
self.output_debug_file(img, self.smoothed_min_path.data, "normalized_symmetry")
# multiply normalised symmetry data with the minimum path result
from msct_image import change_data_orientation
rpi_normalized_sym = change_data_orientation(np.power(normalised_symmetry, self.symmetry_exponent), self.raw_orientation, "RPI")
self.spine_detect_data = np.multiply(self.smoothed_min_path.data, rpi_normalized_sym)
self.output_debug_file(img, self.spine_detect_data, "symmetry_x_min_path")
# extract the centerline from the minimal path image
self.centerline_with_outliers = get_centerline(self.spine_detect_data, self.spine_detect_data.shape)
else:
# extract the centerline from the minimal path image
self.centerline_with_outliers = get_centerline(self.smoothed_min_path.data, self.smoothed_min_path.data.shape)
self.output_debug_file(img, self.centerline_with_outliers, 'centerline_with_outliers')
# saving centerline with outliers to have
img.data = self.centerline_with_outliers
img.change_orientation()
img.file_name = 'centerline_with_outliers'
img.save()
# use a b-spline to smooth out the centerline
x, y, z, dx, dy, dz = smooth_centerline('centerline_with_outliers.nii.gz')
# save the centerline
nx, ny, nz, nt, px, py, pz, pt = img.dim
img.data = np.zeros((nx, ny, nz))
for i in range(0, np.size(x)):
img.data[int(x[i]), int(y[i]), int(z[i])] = 1
self.output_debug_file(img, img.data, 'centerline')
img.change_orientation(self.raw_orientation)
img.file_name = 'centerline'
img.save()
# copy back centerline
os.chdir('../')
conv.convert(self.path_tmp+img.file_name+img.ext, self.output_filename)
if self.rm_tmp_file == 1:
import shutil
shutil.rmtree(self.path_tmp)
print 'To view the output with FSL :'
sct.printv('fslview '+self.input_image.absolutepath+' '+self.output_filename+' -l Red &', self.verbose, 'info')
def get_parser():
"""
:return: Returns the parser with the command line documentation contained in it.
"""
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description("""This program is used to get the centerline of the spinal cord of a subject by using one of the three methods describe in the -method flag .""")
parser.add_option(name='-i',
type_value='image_nifti',
description='Image to get centerline from.',
mandatory=True,
example='t2.nii.gz')
parser.usage.addSection('Execution Option')
parser.add_option(name='-p',
type_value='multiple_choice',
description='Method to get the centerline:\n'
'auto: Uses vesselness filtering + minimal path + body symmetry. Fully automatic.\n'
'point: Uses slice-by-slice registration. Requires point inside the cord. Requires FSL flirt.\n'
'labels: To use if you want to complete an existing centerline/segmentation : fit spline function across labels. Requires a couple of points along the cord. The -i file should be an uncomplete segmentation of SC or centerline, the -l file should be labels where there is a gap in the segentation/centerline.',
mandatory=True,
example=['auto', 'point', 'labels'])
parser.add_option(name='-method',
type_value='multiple_choice',
description='Method to get the centerline:\n'
'auto: Uses vesselness filtering + minimal path + body symmetry. Fully automatic.\n'
'point: Uses slice-by-slice registration. Requires point inside the cord. Requires FSL flirt.\n'
'labels: Fit spline function across labels. Requires a couple of points along the cord.',
mandatory=False,
deprecated_by='-p',
example=['auto', 'point', 'labels'])
parser.usage.addSection('General options')
parser.add_option(name='-o',
type_value='str',
description='Centerline file name (result file name)',
mandatory=False,
example='out.nii.gz')
parser.add_option(name='-r',
type_value='multiple_choice',
description= 'Removes the temporary folder and debug folder used for the algorithm at the end of execution',
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name='-v',
type_value='multiple_choice',
description='1: display on, 0: display off (default)',
mandatory=False,
example=['0', '1', '2'],
default_value='1')
parser.add_option(name='-h',
type_value=None,
description='display this help',
mandatory=False)
parser.usage.addSection('Automatic method options')
parser.add_option(name='-c',
type_value='multiple_choice',
description='type of image contrast, t2: cord dark / CSF bright ; t1: cord bright / CSF dark.\n'
'For dMRI use t1, for T2* or MT use t2',
mandatory=False,
example=['t1', 't2'])
parser.add_option(name='-contrast',
type_value='multiple_choice',
description='type of image contrast, t2: cord dark / CSF bright ; t1: cord bright / CSF dark.\n'
'For dMRI use t1, for T2* or MT use t2',
mandatory=False,
deprecated_by='-c',
example=['t1', 't2'])
parser.add_option(name='-t',
type_value='multiple_choice',
description='type of image contrast, t2: cord dark / CSF bright ; t1: cord bright / CSF dark.\n'
'For dMRI use t1, for T2* or MT use t2',
deprecated_by='-c',
mandatory=False,
example=['t1', 't2'])
parser.add_option(name="-radius",
type_value="int",
description="Approximate radius of spinal cord to help the algorithm",
mandatory=False,
default_value="4",
example="4")
parser.add_option(name="-smooth_vesselness",
type_value="multiple_choice",
description="Smoothing of the vesselness image",
mandatory=False,
default_value="0",
example=['0', '1'])
parser.add_option(name='-sym_exp',
type_value='int',
description='Weight symmetry value (only use with flag -sym). Minimum weight: 0, maximum weight: 100.',
mandatory=False,
default_value=10)
parser.add_option(name='-sym',
type_value='multiple_choice',
description='Uses right-left symmetry of the image to improve accuracy.',
mandatory=False,
default_value='0',
example=['0', '1'])
parser.usage.addSection('Point method options')
parser.add_option(name='-point',
type_value='file',
description='Binary image with a point inside the spinal cord.',
mandatory=False,
example='t2_point.nii.gz')
parser.add_option(name="-g",
type_value="int",
description="Gap between slices for registration. Higher is faster but less robust.",
mandatory=False,
default_value=4,
example="4")
parser.add_option(name='-k',
type_value='int',
description='Kernel size for gaussian mask. Higher is more robust but less accurate.',
mandatory=False,
default_value=4,
example='4')
parser.usage.addSection('Label method options')
parser.add_option(name='-l',
type_value=[[','], 'file'],
description='Binary image with several points (5 to 10) along the spinal cord.',
mandatory=False,
example='t2_labels.nii.gz')
return parser
if __name__ == '__main__':
param = Param()
param_default = Param()
# init default params
output_file_name = None
verbose = param_default.verbose
rm_tmp_files = param_default.remove_temp_files
# get parser info
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
method = arguments['-p']
fname_in = arguments['-i']
if '-o' in arguments:
output_file_name = arguments['-o']
if '-v' in arguments:
verbose = int(arguments['-v'])
if '-r' in arguments:
rm_tmp_files = int(arguments['-r'])
if method == 'labels':
if '-l' in arguments:
list_fname_labels = arguments['-l']
else:
sct.printv('ERROR: Needs input label (option -l).', 1, 'error')
get_centerline_from_labels(fname_in, list_fname_labels, param, output_file_name, rm_tmp_files)
elif method == 'point':
if '-point' in arguments:
fname_point = arguments['-point']
else:
sct.printv('ERROR: Needs input point (option -point).', 1, 'error')
if '-g' in arguments:
gap = arguments['-g']
if '-k' in arguments:
gaussian_kernel = arguments['-k']
get_centerline_from_point(fname_in, fname_point, gap, gaussian_kernel, rm_tmp_files)
elif method == 'auto':
try:
contrast = arguments['-c']
except Exception, e:
sct.printv('The method automatic requires a contrast type to be defined', type='error')
im = Image(fname_in)
scad = SCAD(im, contrast=contrast)
if '-o' in arguments:
scad.output_filename = arguments['-o']
if '-r' in arguments:
scad.rm_tmp_file = int(arguments['-r'])
if '-sym' in arguments:
scad.enable_symmetry = int(arguments['-sym'])
if '-sym_exp' in arguments:
scad.symmetry_exponent = int(arguments['-sym_exp'])
if '-radius' in arguments:
scad.spinalcord_radius = int(arguments['-radius'])
if '-smooth_vesselness' in arguments:
scad.smooth_vesselness = int(arguments['-smooth_vesselness'])
if '-v' in arguments:
scad.verbose = int(arguments['-v'])
scad.execute()
| mit |
pducks32/intergrala | python/sympy/sympy/interactive/printing.py | 22 | 15069 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
return latex_to_png(o)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError:
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
try:
return _matplotlib_wrapper(s)
except Exception:
# Matplotlib.mathtext cannot render some things (like
# matrices)
return None
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if IPython.__version__ >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
if ip and ip.__module__.startswith('IPython') and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if IPython.__version__ >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if ip is not None and ip.__module__.startswith('IPython'):
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| mit |
mahak/spark | python/pyspark/pandas/spark/accessors.py | 11 | 42801 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Spark related features. Usually, the features here are missing in pandas
but Spark has it.
"""
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, Generic, List, Optional, Union, cast
from pyspark import StorageLevel
from pyspark.sql import Column, DataFrame as SparkDataFrame
from pyspark.sql.types import DataType, StructType
from pyspark.pandas._typing import IndexOpsLike
from pyspark.pandas.internal import InternalField
if TYPE_CHECKING:
from pyspark.sql._typing import OptionalPrimitiveType # noqa: F401 (SPARK-34943)
from pyspark._typing import PrimitiveType # noqa: F401 (SPARK-34943)
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import CachedDataFrame # noqa: F401 (SPARK-34943)
class SparkIndexOpsMethods(Generic[IndexOpsLike], metaclass=ABCMeta):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, data: IndexOpsLike):
self._data = data
@property
def data_type(self) -> DataType:
"""Returns the data type as defined by Spark, as a Spark DataType object."""
return self._data._internal.spark_type_for(self._data._column_label)
@property
def nullable(self) -> bool:
"""Returns the nullability as defined by Spark."""
return self._data._internal.spark_column_nullable_for(self._data._column_label)
@property
def column(self) -> Column:
"""
Spark Column object representing the Series/Index.
.. note:: This Spark Column object is strictly stick to its base DataFrame the Series/Index
was derived from.
"""
return self._data._internal.spark_column_for(self._data._column_label)
def transform(self, func: Callable[[Column], Column]) -> IndexOpsLike:
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index. The output length of the Spark column should be same as input's.
.. note:: It requires to have the same input and output length; therefore,
the aggregate Spark functions such as count does not work.
Parameters
----------
func : function
Function to use for transforming the data by using Spark columns.
Returns
-------
Series or Index
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from pyspark.sql.functions import log
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.transform(lambda c: log(c))
0 0.000000
1 0.693147
2 1.098612
Name: a, dtype: float64
>>> df.index.spark.transform(lambda c: c + 10)
Int64Index([10, 11, 12], dtype='int64')
>>> df.a.spark.transform(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from pyspark.pandas import MultiIndex
if isinstance(self._data, MultiIndex):
raise NotImplementedError("MultiIndex does not support spark.transform yet.")
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
# Trigger the resolution so it throws an exception if anything does wrong
# within the function, for example,
# `df1.a.spark.transform(lambda _: F.col("non-existent"))`.
field = InternalField.from_struct_field(
self._data._internal.spark_frame.select(output).schema.fields[0]
)
return self._data._with_new_scol(scol=output, field=field)
@property
@abstractmethod
def analyzed(self) -> IndexOpsLike:
pass
class SparkSeriesMethods(SparkIndexOpsMethods["ps.Series"]):
def apply(self, func: Callable[[Column], Column]) -> "ps.Series":
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: It forces to lose the index and end up with using default index. It is
preferred to use :meth:`Series.spark.transform` or `:meth:`DataFrame.spark.apply`
with specifying the `inedx_col`.
.. note:: It does not require to have the same length of the input and output.
However, it requires to create a new DataFrame internally which will require
to set `compute.ops_on_diff_frames` to compute even with the same origin
DataFrame that is expensive, whereas :meth:`Series.spark.transform` does not
require it.
Parameters
----------
func : function
Function to apply the function against the data by using Spark columns.
Returns
-------
Series
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from pyspark import pandas as ps
>>> from pyspark.sql.functions import count, lit
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.apply(lambda c: count(c))
0 3
Name: a, dtype: int64
>>> df.a.spark.apply(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.internal import HIDDEN_COLUMNS
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
assert isinstance(self._data, Series)
sdf = self._data._internal.spark_frame.drop(*HIDDEN_COLUMNS).select(output)
# Lose index.
return first_series(DataFrame(sdf)).rename(self._data.name)
@property
def analyzed(self) -> "ps.Series":
"""
Returns a new Series with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed Series and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
Series
Examples
--------
>>> ser = ps.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
The analyzed one should return the same value.
>>> ser.spark.analyzed
0 1
1 2
2 3
dtype: int64
However, it won't work with the same anchor Series.
>>> ser + ser.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (ser + ser.spark.analyzed).sort_index()
0 2
1 4
2 6
dtype: int64
"""
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import first_series
return first_series(DataFrame(self._data._internal.resolved_copy))
class SparkIndexMethods(SparkIndexOpsMethods["ps.Index"]):
@property
def analyzed(self) -> "ps.Index":
"""
Returns a new Index with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed Series and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
Index
Examples
--------
>>> idx = ps.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
The analyzed one should return the same value.
>>> idx.spark.analyzed
Int64Index([1, 2, 3], dtype='int64')
However, it won't work with the same anchor Index.
>>> idx + idx.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (idx + idx.spark.analyzed).sort_values()
Int64Index([2, 4, 6], dtype='int64')
"""
from pyspark.pandas.frame import DataFrame
return DataFrame(self._data._internal.resolved_copy).index
class SparkFrameMethods(object):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, frame: "ps.DataFrame"):
self._psdf = frame
def schema(self, index_col: Optional[Union[str, List[str]]] = None) -> StructType:
"""
Returns the underlying Spark schema.
Returns
-------
pyspark.sql.types.StructType
The underlying Spark schema.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.schema().simpleString()
'struct<a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
>>> df.spark.schema(index_col='index').simpleString()
'struct<index:bigint,a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
"""
return self.frame(index_col).schema
def print_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> None:
"""
Prints out the underlying Spark schema in the tree format.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.print_schema() # doctest: +NORMALIZE_WHITESPACE
root
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
>>> df.spark.print_schema(index_col='index') # doctest: +NORMALIZE_WHITESPACE
root
|-- index: long (nullable = false)
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
"""
self.frame(index_col).printSchema()
def frame(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
"""
Return the current DataFrame as a Spark DataFrame. :meth:`DataFrame.spark.frame` is an
alias of :meth:`DataFrame.to_spark`.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_spark
DataFrame.to_pandas_on_spark
DataFrame.spark.frame
Examples
--------
By default, this method loses the index as below.
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.spark.frame().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to pandas-on-Spark DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_pandas_on_spark(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to pandas-on-Spark DataFrame.
>>> new_spark_df.to_pandas_on_spark(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
from pyspark.pandas.utils import name_like_string
psdf = self._psdf
data_column_names = []
data_columns = []
for i, (label, spark_column, column_name) in enumerate(
zip(
psdf._internal.column_labels,
psdf._internal.data_spark_columns,
psdf._internal.data_spark_column_names,
)
):
name = str(i) if label is None else name_like_string(label)
data_column_names.append(name)
if column_name != name:
spark_column = spark_column.alias(name)
data_columns.append(spark_column)
if index_col is None:
return psdf._internal.spark_frame.select(data_columns)
else:
if isinstance(index_col, str):
index_col = [index_col]
old_index_scols = psdf._internal.index_spark_columns
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col))
)
if any(col in data_column_names for col in index_col):
raise ValueError("'index_col' cannot be overlapped with other columns.")
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)
]
return psdf._internal.spark_frame.select(new_index_scols + data_columns)
def cache(self) -> "CachedDataFrame":
"""
Yields and caches the current DataFrame.
The pandas-on-Spark DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
If you want to specify the StorageLevel manually, use :meth:`DataFrame.spark.persist`
See Also
--------
DataFrame.spark.persist
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.spark.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from pyspark.pandas.frame import CachedDataFrame
self._psdf._update_internal_frame(
self._psdf._internal.resolved_copy, requires_same_anchor=False
)
return CachedDataFrame(self._psdf._internal)
def persist(
self, storage_level: StorageLevel = StorageLevel.MEMORY_AND_DISK
) -> "CachedDataFrame":
"""
Yields and caches the current DataFrame with a specific StorageLevel.
If a StogeLevel is not given, the `MEMORY_AND_DISK` level is used by default like PySpark.
The pandas-on-Spark DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
See Also
--------
DataFrame.spark.cache
Examples
--------
>>> import pyspark
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Memory Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
Set the StorageLevel to `DISK_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.DISK_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
If a StorageLevel is not given, it uses `MEMORY_AND_DISK` by default.
>>> with df.spark.persist() as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Memory Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
>>> df = df.spark.persist()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from pyspark.pandas.frame import CachedDataFrame
self._psdf._update_internal_frame(
self._psdf._internal.resolved_copy, requires_same_anchor=False
)
return CachedDataFrame(self._psdf._internal, storage_level=storage_level)
def hint(self, name: str, *parameters: "PrimitiveType") -> "ps.DataFrame":
"""
Specifies some hint on the current DataFrame.
Parameters
----------
name : A name of the hint.
parameters : Optional parameters.
Returns
-------
ret : DataFrame with the hint.
See Also
--------
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value']).set_index('lkey')
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value']).set_index('rkey')
>>> merged = df1.merge(df2.spark.hint("broadcast"), left_index=True, right_index=True)
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
return DataFrame(internal.with_new_sdf(internal.spark_frame.hint(name, *parameters)))
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""
Write the DataFrame into a Spark table. :meth:`DataFrame.spark.to_table`
is an alias of :meth:`DataFrame.to_table`.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the table exists
already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options
Additional options passed directly to Spark.
Returns
-------
None
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._psdf.spark.frame(index_col=index_col).write.saveAsTable(
name=name, format=format, mode=mode, partitionBy=partition_cols, **options
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""Write the DataFrame out to a Spark data source. :meth:`DataFrame.spark.to_spark_io`
is an alias of :meth:`DataFrame.to_spark_io`.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
None
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._psdf.spark.frame(index_col=index_col).write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, **options
)
def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None) -> None:
"""
Prints the underlying (logical and physical) Spark plans to the console for debugging
purpose.
Parameters
----------
extended : boolean, default ``False``.
If ``False``, prints only the physical plan.
mode : string, default ``None``.
The expected output format of plans.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame({'id': range(10)})
>>> df.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
>>> df.spark.explain(True) # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.spark.explain("extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.spark.explain(mode="extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
self._psdf._internal.to_internal_spark_frame.explain(extended, mode)
def apply(
self,
func: Callable[[SparkDataFrame], SparkDataFrame],
index_col: Optional[Union[str, List[str]]] = None,
) -> "ps.DataFrame":
"""
Applies a function that takes and returns a Spark DataFrame. It allows natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: set `index_col` and keep the column named as so in the output Spark
DataFrame to avoid using the default index to prevent performance penalty.
If you omit `index_col`, it will use default index which is potentially
expensive in general.
.. note:: it will lose column labels. This is a synonym of
``func(psdf.to_spark(index_col)).to_pandas_on_spark(index_col)``.
Parameters
----------
func : function
Function to apply the function against the data by using Spark DataFrame.
Returns
-------
DataFrame
Raises
------
ValueError : If the output from the function is not a Spark DataFrame.
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.spark.apply(
... lambda sdf: sdf.selectExpr("a + b as c", "index"), index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
c
index
0 5
1 7
2 9
The case below ends up with using the default index, which should be avoided
if possible.
>>> psdf.spark.apply(lambda sdf: sdf.groupby("a").count().sort("a"))
a count
0 1 1
1 2 1
2 3 1
"""
output = func(self.frame(index_col))
if not isinstance(output, SparkDataFrame):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.DataFrame; however, got [%s]." % (func, type(output))
)
psdf = output.to_pandas_on_spark(index_col) # type: ignore
return cast("ps.DataFrame", psdf)
def repartition(self, num_partitions: int) -> "ps.DataFrame":
"""
Returns a new DataFrame partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
Parameters
----------
num_partitions : int
The target number of partitions.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"age": [5, 5, 2, 2],
... "name": ["Bob", "Bob", "Alice", "Alice"]}).set_index("age")
>>> psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
>>> new_psdf = psdf.spark.repartition(7)
>>> new_psdf.to_spark().rdd.getNumPartitions()
7
>>> new_psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
repartitioned_sdf = internal.spark_frame.repartition(num_partitions)
return DataFrame(internal.with_new_sdf(repartitioned_sdf))
def coalesce(self, num_partitions: int) -> "ps.DataFrame":
"""
Returns a new DataFrame that has exactly `num_partitions` partitions.
.. note:: This operation results in a narrow dependency, e.g. if you go from 1000
partitions to 100 partitions, there will not be a shuffle, instead each of the 100 new
partitions will claim 10 of the current partitions. If a larger number of partitions is
requested, it will stay at the current number of partitions. However, if you're doing a
drastic coalesce, e.g. to num_partitions = 1, this may result in your computation taking
place on fewer nodes than you like (e.g. one node in the case of num_partitions = 1). To
avoid this, you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever the current
partitioning is).
Parameters
----------
num_partitions : int
The target number of partitions.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"age": [5, 5, 2, 2],
... "name": ["Bob", "Bob", "Alice", "Alice"]}).set_index("age")
>>> psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
>>> new_psdf = psdf.spark.coalesce(1)
>>> new_psdf.to_spark().rdd.getNumPartitions()
1
>>> new_psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
coalesced_sdf = internal.spark_frame.coalesce(num_partitions)
return DataFrame(internal.with_new_sdf(coalesced_sdf))
def checkpoint(self, eager: bool = True) -> "ps.DataFrame":
"""Returns a checkpointed version of this DataFrame.
Checkpointing can be used to truncate the logical plan of this DataFrame, which is
especially useful in iterative algorithms where the plan may grow exponentially. It will be
saved to files inside the checkpoint directory set with `SparkContext.setCheckpointDir`.
Parameters
----------
eager : bool
Whether to checkpoint this DataFrame immediately
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"a": ["a", "b", "c"]})
>>> psdf
a
0 a
1 b
2 c
>>> new_psdf = psdf.spark.checkpoint() # doctest: +SKIP
>>> new_psdf # doctest: +SKIP
a
0 a
1 b
2 c
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
checkpointed_sdf = internal.spark_frame.checkpoint(eager)
return DataFrame(internal.with_new_sdf(checkpointed_sdf))
def local_checkpoint(self, eager: bool = True) -> "ps.DataFrame":
"""Returns a locally checkpointed version of this DataFrame.
Checkpointing can be used to truncate the logical plan of this DataFrame, which is
especially useful in iterative algorithms where the plan may grow exponentially. Local
checkpoints are stored in the executors using the caching subsystem and therefore they are
not reliable.
Parameters
----------
eager : bool
Whether to locally checkpoint this DataFrame immediately
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"a": ["a", "b", "c"]})
>>> psdf
a
0 a
1 b
2 c
>>> new_psdf = psdf.spark.local_checkpoint()
>>> new_psdf
a
0 a
1 b
2 c
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
checkpointed_sdf = internal.spark_frame.localCheckpoint(eager)
return DataFrame(internal.with_new_sdf(checkpointed_sdf))
@property
def analyzed(self) -> "ps.DataFrame":
"""
Returns a new DataFrame with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed DataFrame and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
DataFrame
Examples
--------
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
The analyzed one should return the same value.
>>> df.spark.analyzed
a b
0 1 4
1 2 5
2 3 6
However, it won't work with the same anchor Series.
>>> df + df.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (df + df.spark.analyzed).sort_index()
a b
0 2 8
1 4 10
2 6 12
"""
from pyspark.pandas.frame import DataFrame
return DataFrame(self._psdf._internal.resolved_copy)
class CachedSparkFrameMethods(SparkFrameMethods):
"""Spark related features for cached DataFrame. This is usually created via
`df.spark.cache()`."""
def __init__(self, frame: "CachedDataFrame"):
super().__init__(frame)
@property
def storage_level(self) -> StorageLevel:
"""
Return the storage level of this cache.
Examples
--------
>>> import pyspark.pandas as ps
>>> import pyspark
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.spark.storage_level)
...
Disk Memory Deserialized 1x Replicated
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
...
Memory Serialized 1x Replicated
"""
return self._psdf._cached.storageLevel
def unpersist(self) -> None:
"""
The `unpersist` function is used to uncache the pandas-on-Spark DataFrame when it
is not used with `with` statement.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.spark.cache()
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
if self._psdf._cached.is_cached:
self._psdf._cached.unpersist()
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
import uuid
import numpy
import pandas
from pyspark.sql import SparkSession
import pyspark.pandas.spark.accessors
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.spark.accessors.__dict__.copy()
globs["np"] = numpy
globs["pd"] = pandas
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.spark.accessors tests")
.getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.spark.accessors,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Cortexelus/librosa | tests/test_core.py | 1 | 33211 | #!/usr/bin/env python
# CREATED:2013-03-08 15:25:18 by Brian McFee <[email protected]>
# unit tests for librosa core (__init__.py)
#
# Run me as follows:
# cd tests/
# nosetests -v --with-coverage --cover-package=librosa
#
from __future__ import print_function
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import librosa
import glob
import numpy as np
import scipy.io
import six
from nose.tools import eq_, raises, make_decorator
import matplotlib
matplotlib.use('Agg')
# -- utilities --#
def files(pattern):
test_files = glob.glob(pattern)
test_files.sort()
return test_files
def srand(seed=628318530):
np.random.seed(seed)
pass
def load(infile):
return scipy.io.loadmat(infile, chars_as_strings=True)
def test_load():
# Note: this does not test resampling.
# That is a separate unit test.
def __test(infile):
DATA = load(infile)
y, sr = librosa.load(DATA['wavfile'][0],
sr=None,
mono=DATA['mono'])
# Verify that the sample rate is correct
eq_(sr, DATA['sr'])
assert np.allclose(y, DATA['y'])
for infile in files('data/core-load-*.mat'):
yield (__test, infile)
pass
def test_segment_load():
sample_len = 2003
fs = 44100
test_file = 'data/test1_44100.wav'
y, sr = librosa.load(test_file, sr=None, mono=False,
offset=0., duration=sample_len/float(fs))
eq_(y.shape[-1], sample_len)
y2, sr = librosa.load(test_file, sr=None, mono=False)
assert np.allclose(y, y2[:, :sample_len])
sample_offset = 2048
y, sr = librosa.load(test_file, sr=None, mono=False,
offset=sample_offset/float(fs), duration=1.0)
eq_(y.shape[-1], fs)
y2, sr = librosa.load(test_file, sr=None, mono=False)
assert np.allclose(y, y2[:, sample_offset:sample_offset+fs])
def test_resample_mono():
def __test(y, sr_in, sr_out, res_type, fix):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
fix=fix)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=True)
# If it's a no-op, make sure the signal is untouched
if sr_out == sr_in:
assert np.allclose(y, y2)
# Check buffer contiguity
assert y2.flags['C_CONTIGUOUS']
# Check that we're within one sample of the target length
target_length = y.shape[-1] * sr_out // sr_in
assert np.abs(y2.shape[-1] - target_length) <= 1
for infile in ['data/test1_44100.wav',
'data/test1_22050.wav',
'data/test2_8000.wav']:
y, sr_in = librosa.load(infile, sr=None, duration=5)
for sr_out in [8000, 22050]:
for res_type in ['kaiser_best', 'kaiser_fast', 'scipy']:
for fix in [False, True]:
yield (__test, y, sr_in, sr_out, res_type, fix)
def test_resample_stereo():
def __test(y, sr_in, sr_out, res_type, fix):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
fix=fix)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=False)
eq_(y2.ndim, y.ndim)
# If it's a no-op, make sure the signal is untouched
if sr_out == sr_in:
assert np.allclose(y, y2)
# Check buffer contiguity
assert y2.flags['C_CONTIGUOUS']
# Check that we're within one sample of the target length
target_length = y.shape[-1] * sr_out // sr_in
assert np.abs(y2.shape[-1] - target_length) <= 1
y, sr_in = librosa.load('data/test1_44100.wav', mono=False, sr=None, duration=5)
for sr_out in [8000, 22050]:
for res_type in ['kaiser_fast', 'scipy']:
for fix in [False, True]:
yield __test, y, sr_in, sr_out, res_type, fix
def test_resample_scale():
def __test(sr_in, sr_out, res_type, y):
y2 = librosa.resample(y, sr_in, sr_out,
res_type=res_type,
scale=True)
# First, check that the audio is valid
librosa.util.valid_audio(y2, mono=True)
n_orig = np.sqrt(np.sum(np.abs(y)**2))
n_res = np.sqrt(np.sum(np.abs(y2)**2))
# If it's a no-op, make sure the signal is untouched
assert np.allclose(n_orig, n_res, atol=1e-2), (n_orig, n_res)
y, sr_in = librosa.load('data/test1_44100.wav', mono=True, sr=None, duration=5)
for sr_out in [11025, 22050, 44100]:
for res_type in ['scipy', 'kaiser_best', 'kaiser_fast']:
yield __test, sr_in, sr_out, res_type, y
def test_stft():
def __test(infile):
DATA = load(infile)
# Load the file
(y, sr) = librosa.load(DATA['wavfile'][0], sr=None, mono=True)
if DATA['hann_w'][0, 0] == 0:
# Set window to ones, swap back to nfft
window = np.ones
win_length = None
else:
window = 'hann'
win_length = DATA['hann_w'][0, 0]
# Compute the STFT
D = librosa.stft(y,
n_fft=DATA['nfft'][0, 0].astype(int),
hop_length=DATA['hop_length'][0, 0].astype(int),
win_length=win_length,
window=window,
center=False)
assert np.allclose(D, DATA['D'])
for infile in files('data/core-stft-*.mat'):
yield (__test, infile)
def test_ifgram():
def __test(infile):
DATA = load(infile)
y, sr = librosa.load(DATA['wavfile'][0], sr=None, mono=True)
# Compute the IFgram
F, D = librosa.ifgram(y,
n_fft=DATA['nfft'][0, 0].astype(int),
hop_length=DATA['hop_length'][0, 0].astype(int),
win_length=DATA['hann_w'][0, 0].astype(int),
sr=DATA['sr'][0, 0].astype(int),
ref_power=0.0,
clip=False,
center=False)
# D fails to match here because of fftshift()
# assert np.allclose(D, DATA['D'])
assert np.allclose(F, DATA['F'], rtol=1e-1, atol=1e-1)
for infile in files('data/core-ifgram-*.mat'):
yield (__test, infile)
def test_ifgram_matches_stft():
y, sr = librosa.load('data/test1_22050.wav')
def __test(n_fft, hop_length, win_length, center, norm, dtype):
D_stft = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
dtype=dtype)
_, D_ifgram = librosa.ifgram(y, sr, n_fft=n_fft,
hop_length=hop_length,
win_length=win_length, center=center,
norm=norm, dtype=dtype)
if norm:
# STFT doesn't do window normalization;
# let's just ignore the relative scale to make this easy
D_stft = librosa.util.normalize(D_stft, axis=0)
D_ifgram = librosa.util.normalize(D_ifgram, axis=0)
assert np.allclose(D_stft, D_ifgram)
for n_fft in [1024, 2048]:
for hop_length in [None, n_fft // 2, n_fft // 4]:
for win_length in [None, n_fft // 2, n_fft // 4]:
for center in [False, True]:
for norm in [False, True]:
for dtype in [np.complex64, np.complex128]:
yield (__test, n_fft, hop_length, win_length,
center, norm, dtype)
def test_ifgram_if():
y, sr = librosa.load('data/test1_22050.wav')
def __test(ref_power, clip):
F, D = librosa.ifgram(y, sr=sr, ref_power=ref_power, clip=clip)
if clip:
assert np.all(0 <= F) and np.all(F <= 0.5 * sr)
assert np.all(np.isfinite(F))
for ref_power in [-10, 0.0, 1e-6, np.max]:
for clip in [False, True]:
if six.callable(ref_power) or ref_power >= 0.0:
tf = __test
else:
tf = raises(librosa.ParameterError)(__test)
yield tf, ref_power, clip
def test_salience_basecase():
(y, sr) = librosa.load('data/test1_22050.wav')
S = np.abs(librosa.stft(y))
freqs = librosa.core.fft_frequencies(sr)
harms = [1]
weights = [1.0]
S_sal = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
assert np.allclose(S_sal, S)
def test_salience_basecase2():
(y, sr) = librosa.load('data/test1_22050.wav')
S = np.abs(librosa.stft(y))
freqs = librosa.core.fft_frequencies(sr)
harms = [1, 0.5, 2.0]
weights = [1.0, 0.0, 0.0]
S_sal = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
assert np.allclose(S_sal, S)
def test_salience_defaults():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
actual = librosa.core.salience(
S, freqs, harms, kind='quadratic', fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.3, 2.4, 1.5],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_weights():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, kind='quadratic', fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.3, 2.4, 1.5],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_no_peak_filter():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, filter_peaks=False, kind='quadratic'
)
expected = np.array([
[0.3, 1.7, 1.2],
[0.3, 2.4, 1.5],
[1.5, 5.1, 2.3],
[1.3, 3.9, 1.1]
]) / 3.0
assert np.allclose(expected, actual)
def test_salience_aggregate():
S = np.array([
[0.1, 0.5, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.7, 0.3],
[1.3, 3.2, 0.8]
])
freqs = np.array([50.0, 100.0, 200.0, 400.0])
harms = [0.5, 1, 2]
weights = [1.0, 1.0, 1.0]
actual = librosa.core.salience(
S, freqs, harms, weights, aggregate=np.ma.max, kind='quadratic',
fill_value=0.0
)
expected = np.array([
[0.0, 0.0, 0.0],
[0.2, 1.2, 1.2],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
assert np.allclose(expected, actual)
def test_magphase():
(y, sr) = librosa.load('data/test1_22050.wav')
D = librosa.stft(y)
S, P = librosa.magphase(D)
assert np.allclose(S * P, D)
def test_istft_reconstruction():
from scipy.signal import bartlett, hann, hamming, blackman, blackmanharris
def __test(x, n_fft, hop_length, window, atol):
S = librosa.core.stft(
x, n_fft=n_fft, hop_length=hop_length, window=window)
x_reconstructed = librosa.core.istft(
S, hop_length=hop_length, window=window)
L = min(len(x), len(x_reconstructed))
x = np.resize(x, L)
x_reconstructed = np.resize(x_reconstructed, L)
# NaN/Inf/-Inf should not happen
assert np.all(np.isfinite(x_reconstructed))
# should be almost approximately reconstucted
assert np.allclose(x, x_reconstructed, atol=atol)
srand()
# White noise
x1 = np.random.randn(2 ** 15)
# Sin wave
x2 = np.sin(np.linspace(-np.pi, np.pi, 2 ** 15))
# Real music signal
x3, sr = librosa.load('data/test1_44100.wav', sr=None, mono=True)
assert sr == 44100
for x, atol in [(x1, 1.0e-6), (x2, 1.0e-7), (x3, 1.0e-7)]:
for window_func in [bartlett, hann, hamming, blackman, blackmanharris]:
for n_fft in [512, 1024, 2048, 4096]:
win = window_func(n_fft, sym=False)
symwin = window_func(n_fft, sym=True)
# tests with pre-computed window fucntions
for hop_length_denom in six.moves.range(2, 9):
hop_length = n_fft // hop_length_denom
yield (__test, x, n_fft, hop_length, win, atol)
yield (__test, x, n_fft, hop_length, symwin, atol)
# also tests with passing widnow function itself
yield (__test, x, n_fft, n_fft // 9, window_func, atol)
# test with default paramters
x_reconstructed = librosa.core.istft(librosa.core.stft(x))
L = min(len(x), len(x_reconstructed))
x = np.resize(x, L)
x_reconstructed = np.resize(x_reconstructed, L)
assert np.allclose(x, x_reconstructed, atol=atol)
def test_load_options():
filename = 'data/test1_22050.wav'
def __test(offset, duration, mono, dtype):
y, sr = librosa.load(filename, mono=mono, offset=offset,
duration=duration, dtype=dtype)
if duration is not None:
assert np.allclose(y.shape[-1], int(sr * duration))
if mono:
eq_(y.ndim, 1)
else:
# This test file is stereo, so y.ndim should be 2
eq_(y.ndim, 2)
# Check the dtype
assert np.issubdtype(y.dtype, dtype)
assert np.issubdtype(dtype, y.dtype)
for offset in [0, 1, 2]:
for duration in [None, 0, 0.5, 1, 2]:
for mono in [False, True]:
for dtype in [np.float32, np.float64]:
yield __test, offset, duration, mono, dtype
pass
def test_get_duration_wav():
def __test_audio(filename, mono, sr, duration):
y, sr = librosa.load(filename, sr=sr, mono=mono, duration=duration)
duration_est = librosa.get_duration(y=y, sr=sr)
assert np.allclose(duration_est, duration, rtol=1e-3, atol=1e-5)
def __test_spec(filename, sr, duration, n_fft, hop_length, center):
y, sr = librosa.load(filename, sr=sr, duration=duration)
S = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, center=center)
duration_est = librosa.get_duration(S=S, sr=sr, n_fft=n_fft,
hop_length=hop_length,
center=center)
# We lose a little accuracy in framing without centering, so it's
# not as precise as time-domain duration
assert np.allclose(duration_est, duration, rtol=1e-1, atol=1e-2)
test_file = 'data/test1_22050.wav'
for sr in [8000, 11025, 22050]:
for duration in [1.0, 2.5]:
for mono in [False, True]:
yield __test_audio, test_file, mono, sr, duration
for n_fft in [256, 512, 1024]:
for hop_length in [n_fft // 8, n_fft // 4, n_fft // 2]:
for center in [False, True]:
yield (__test_spec, test_file, sr,
duration, n_fft, hop_length, center)
def test_get_duration_filename():
filename = 'data/test2_8000.wav'
true_duration = 30.197625
duration_fn = librosa.get_duration(filename=filename)
y, sr = librosa.load(filename, sr=None)
duration_y = librosa.get_duration(y=y, sr=sr)
assert np.allclose(duration_fn, true_duration)
assert np.allclose(duration_fn, duration_y)
def test_autocorrelate():
def __test(y, truth, max_size, axis):
ac = librosa.autocorrelate(y, max_size=max_size, axis=axis)
my_slice = [slice(None)] * truth.ndim
if max_size is not None and max_size <= y.shape[axis]:
my_slice[axis] = slice(min(max_size, y.shape[axis]))
if not np.iscomplexobj(y):
assert not np.iscomplexobj(ac)
assert np.allclose(ac, truth[my_slice])
srand()
# test with both real and complex signals
for y in [np.random.randn(256, 256), np.exp(1.j * np.random.randn(256, 256))]:
# Make ground-truth autocorrelations along each axis
truth = [np.asarray([scipy.signal.fftconvolve(yi, yi[::-1].conj(),
mode='full')[len(yi)-1:] for yi in y.T]).T,
np.asarray([scipy.signal.fftconvolve(yi, yi[::-1].conj(),
mode='full')[len(yi)-1:] for yi in y])]
for axis in [0, 1, -1]:
for max_size in [None, y.shape[axis]//2, y.shape[axis], 2 * y.shape[axis]]:
yield __test, y, truth[axis], max_size, axis
def test_to_mono():
def __test(filename, mono):
y, sr = librosa.load(filename, mono=mono)
y_mono = librosa.to_mono(y)
eq_(y_mono.ndim, 1)
eq_(len(y_mono), y.shape[-1])
if mono:
assert np.allclose(y, y_mono)
filename = 'data/test1_22050.wav'
for mono in [False, True]:
yield __test, filename, mono
def test_zero_crossings():
def __test(data, threshold, ref_magnitude, pad, zp):
zc = librosa.zero_crossings(y=data,
threshold=threshold,
ref_magnitude=ref_magnitude,
pad=pad,
zero_pos=zp)
idx = np.flatnonzero(zc)
if pad:
idx = idx[1:]
for i in idx:
assert np.sign(data[i]) != np.sign(data[i-1])
srand()
data = np.random.randn(32)
for threshold in [None, 0, 1e-10]:
for ref_magnitude in [None, 0.1, np.max]:
for pad in [False, True]:
for zero_pos in [False, True]:
yield __test, data, threshold, ref_magnitude, pad, zero_pos
def test_pitch_tuning():
def __test(hz, resolution, bins_per_octave, tuning):
est_tuning = librosa.pitch_tuning(hz,
resolution=resolution,
bins_per_octave=bins_per_octave)
assert np.abs(tuning - est_tuning) <= resolution
for resolution in [1e-2, 1e-3]:
for bins_per_octave in [12]:
# Make up some frequencies
for tuning in [-0.5, -0.375, -0.25, 0.0, 0.25, 0.375]:
note_hz = librosa.midi_to_hz(tuning + np.arange(128))
yield __test, note_hz, resolution, bins_per_octave, tuning
def test_piptrack_properties():
def __test(S, n_fft, hop_length, fmin, fmax, threshold):
pitches, mags = librosa.core.piptrack(S=S,
n_fft=n_fft,
hop_length=hop_length,
fmin=fmin,
fmax=fmax,
threshold=threshold)
# Shape tests
eq_(S.shape, pitches.shape)
eq_(S.shape, mags.shape)
# Make sure all magnitudes are positive
assert np.all(mags >= 0)
# Check the frequency estimates for bins with non-zero magnitude
idx = (mags > 0)
assert np.all(pitches[idx] >= fmin)
assert np.all(pitches[idx] <= fmax)
# And everywhere else, pitch should be 0
assert np.all(pitches[~idx] == 0)
y, sr = librosa.load('data/test1_22050.wav')
for n_fft in [2048, 4096]:
for hop_length in [None, n_fft // 4, n_fft // 2]:
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))
for fmin in [0, 100]:
for fmax in [4000, 8000, sr // 2]:
for threshold in [0.1, 0.2, 0.5]:
yield __test, S, n_fft, hop_length, fmin, fmax, threshold
def test_piptrack_errors():
def __test(y, sr, S, n_fft, hop_length, fmin, fmax, threshold):
pitches, mags = librosa.piptrack(
y=y, sr=sr, S=S, n_fft=n_fft, hop_length=hop_length, fmin=fmin,
fmax=fmax, threshold=threshold)
S = np.asarray([[1, 0, 0]]).T
np.seterr(divide='raise')
yield __test, None, 22050, S, 4096, None, 150.0, 4000.0, 0.1
def test_piptrack():
def __test(S, freq):
pitches, mags = librosa.piptrack(S=S, fmin=100)
idx = (mags > 0)
assert len(idx) > 0
recovered_pitches = pitches[idx]
# We should be within one cent of the target
assert np.all(np.abs(np.log2(recovered_pitches) - np.log2(freq)) <= 1e-2)
sr = 22050
duration = 3.0
for freq in [110, 220, 440, 880]:
# Generate a sine tone
y = np.sin(2 * np.pi * freq * np.linspace(0, duration, num=duration*sr))
for n_fft in [1024, 2048, 4096]:
# Using left-aligned frames eliminates reflection artifacts at the boundaries
S = np.abs(librosa.stft(y, n_fft=n_fft, center=False))
yield __test, S, freq
def test_estimate_tuning():
def __test(target_hz, resolution, bins_per_octave, tuning):
y = np.sin(2 * np.pi * target_hz * t)
tuning_est = librosa.estimate_tuning(resolution=resolution,
bins_per_octave=bins_per_octave,
y=y,
sr=sr,
n_fft=2048,
fmin=librosa.note_to_hz('C4'),
fmax=librosa.note_to_hz('G#9'))
# Round to the proper number of decimals
deviation = np.around(np.abs(tuning - tuning_est),
int(-np.log10(resolution)))
# We'll accept an answer within three bins of the resolution
assert deviation <= 3 * resolution
for sr in [11025, 22050]:
duration = 5.0
t = np.linspace(0, duration, duration * sr)
for resolution in [1e-2]:
for bins_per_octave in [12]:
# test a null-signal tuning estimate
yield (__test, 0.0, resolution, bins_per_octave, 0.0)
for center_note in [69, 84, 108]:
for tuning in np.linspace(-0.5, 0.5, 8, endpoint=False):
target_hz = librosa.midi_to_hz(center_note + tuning)
yield (__test, np.asscalar(target_hz), resolution,
bins_per_octave, tuning)
def test__spectrogram():
y, sr = librosa.load('data/test1_22050.wav')
def __test(n_fft, hop_length, power):
S = np.abs(librosa.stft(y, n_fft=n_fft, hop_length=hop_length))**power
S_, n_fft_ = librosa.core.spectrum._spectrogram(y=y, S=S, n_fft=n_fft,
hop_length=hop_length,
power=power)
# First check with all parameters
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# Then check with only the audio
S_, n_fft_ = librosa.core.spectrum._spectrogram(y=y, n_fft=n_fft,
hop_length=hop_length,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, n_fft=n_fft,
hop_length=hop_length,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram with no shape parameters
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
# And only the spectrogram but with incorrect n_fft
S_, n_fft_ = librosa.core.spectrum._spectrogram(S=S, n_fft=2*n_fft,
power=power)
assert np.allclose(S, S_)
assert np.allclose(n_fft, n_fft_)
for n_fft in [1024, 2048]:
for hop_length in [None, 512]:
for power in [1, 2]:
yield __test, n_fft, hop_length, power
assert librosa.core.spectrum._spectrogram(y)
def test_logamplitude():
# Fake up some data
def __test(x, ref_power, amin, top_db):
y = librosa.logamplitude(x,
ref_power=ref_power,
amin=amin,
top_db=top_db)
assert np.isrealobj(y)
eq_(y.shape, x.shape)
if top_db is not None:
assert y.min() >= y.max()-top_db
for n in [1, 2, 10]:
x = np.linspace(0, 2e5, num=n)
phase = np.exp(1.j * x)
for ref_power in [1.0, np.max]:
for amin in [-1, 0, 1e-10, 1e3]:
for top_db in [None, -10, 0, 40, 80]:
tf = __test
if amin <= 0 or (top_db is not None and top_db < 0):
tf = raises(librosa.ParameterError)(__test)
yield tf, x, ref_power, amin, top_db
yield tf, x * phase, ref_power, amin, top_db
def test_clicks():
def __test(times, frames, sr, hop_length, click_freq, click_duration, click, length):
y = librosa.clicks(times=times,
frames=frames,
sr=sr,
hop_length=hop_length,
click_freq=click_freq,
click_duration=click_duration,
click=click,
length=length)
if times is not None:
nmax = librosa.time_to_samples(times, sr=sr).max()
else:
nmax = librosa.frames_to_samples(frames, hop_length=hop_length).max()
if length is not None:
assert len(y) == length
elif click is not None:
assert len(y) == nmax + len(click)
test_times = np.linspace(0, 10.0, num=5)
# Bad cases
yield raises(librosa.ParameterError)(__test), None, None, 22050, 512, 1000, 0.1, None, None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0.1, np.ones((2, 10)), None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0.1, None, 0
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 0, 0.1, None, None
yield raises(librosa.ParameterError)(__test), test_times, None, 22050, 512, 1000, 0, None, None
for sr in [11025, 22050]:
for hop_length in [512, 1024]:
test_frames = librosa.time_to_frames(test_times, sr=sr, hop_length=hop_length)
for click in [None, np.ones(sr // 10)]:
for length in [None, 5 * sr, 15 * sr]:
yield __test, test_times, None, sr, hop_length, 1000, 0.1, click, length
yield __test, None, test_frames, sr, hop_length, 1000, 0.1, click, length
def test_fmt_scale():
# This test constructs a single-cycle cosine wave, applies various axis scalings,
# and tests that the FMT is preserved
def __test(scale, n_fmt, over_sample, kind, y_orig, y_res, atol):
# Make sure our signals preserve energy
assert np.allclose(np.sum(y_orig**2), np.sum(y_res**2))
# Scale-transform the original
f_orig = librosa.fmt(y_orig,
t_min=0.5,
n_fmt=n_fmt,
over_sample=over_sample,
kind=kind)
# Force to the same length
n_fmt_res = 2 * len(f_orig) - 2
# Scale-transform the new signal to match
f_res = librosa.fmt(y_res,
t_min=scale * 0.5,
n_fmt=n_fmt_res,
over_sample=over_sample,
kind=kind)
# Due to sampling alignment, we'll get some phase deviation here
# The shape of the spectrum should be approximately preserved though.
assert np.allclose(np.abs(f_orig), np.abs(f_res), atol=atol, rtol=1e-7)
# Our test signal is a single-cycle sine wave
def f(x):
freq = 1
return np.sin(2 * np.pi * freq * x)
bounds = [0, 1.0]
num = 2**8
x = np.linspace(bounds[0], bounds[1], num=num, endpoint=False)
y_orig = f(x)
atol = {'slinear': 1e-4, 'quadratic': 1e-5, 'cubic': 1e-6}
for scale in [2, 3./2, 5./4, 9./8]:
# Scale the time axis
x_res = np.linspace(bounds[0], bounds[1], num=scale * num, endpoint=False)
y_res = f(x_res)
# Re-normalize the energy to match that of y_orig
y_res /= np.sqrt(scale)
for kind in ['slinear', 'quadratic', 'cubic']:
for n_fmt in [None, 64, 128, 256, 512]:
for cur_os in [1, 2, 3]:
yield __test, scale, n_fmt, cur_os, kind, y_orig, y_res, atol[kind]
# Over-sampling with down-scaling gets dicey at the end-points
yield __test, 1./scale, n_fmt, 1, kind, y_res, y_orig, atol[kind]
def test_fmt_fail():
@raises(librosa.ParameterError)
def __test(t_min, n_fmt, over_sample, y):
librosa.fmt(y, t_min=t_min, n_fmt=n_fmt, over_sample=over_sample)
srand()
y = np.random.randn(256)
# Test for bad t_min
for t_min in [-1, 0]:
yield __test, t_min, None, 2, y
# Test for bad n_fmt
for n_fmt in [-1, 0, 1, 2]:
yield __test, 1, n_fmt, 2, y
# Test for bad over_sample
for over_sample in [-1, 0, 0.5]:
yield __test, 1, None, over_sample, y
# Test for bad input
y[len(y)//2:] = np.inf
yield __test, 1, None, 2, y
# Test for insufficient samples
yield __test, 1, None, 1, np.ones(2)
def test_fmt_axis():
srand()
y = np.random.randn(32, 32)
f1 = librosa.fmt(y, axis=-1)
f2 = librosa.fmt(y.T, axis=0).T
assert np.allclose(f1, f2)
def test_harmonics_1d():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
def test_harmonics_2d():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
y = np.tile(y, (5, 1)).T
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h, axis=0)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
@raises(librosa.ParameterError)
def test_harmonics_badshape_1d():
freqs = np.zeros(100)
obs = np.zeros((5, 10))
librosa.interp_harmonics(obs, freqs, [1])
@raises(librosa.ParameterError)
def test_harmonics_badshape_2d():
freqs = np.zeros((5, 5))
obs = np.zeros((5, 10))
librosa.interp_harmonics(obs, freqs, [1])
def test_harmonics_2d_varying():
x = np.arange(16)
y = np.linspace(-8, 8, num=len(x), endpoint=False)**2
x = np.tile(x, (5, 1)).T
y = np.tile(y, (5, 1)).T
h = [0.25, 0.5, 1, 2, 4]
yh = librosa.interp_harmonics(y, x, h, axis=0)
eq_(yh.shape[1:], y.shape)
eq_(yh.shape[0], len(h))
for i in range(len(h)):
if h[i] <= 1:
# Check that subharmonics match
step = int(1./h[i])
vals = yh[i, ::step]
assert np.allclose(vals, y[:len(vals)])
else:
# Else check that harmonics match
step = h[i]
vals = y[::step]
assert np.allclose(vals, yh[i, :len(vals)])
| isc |
ehealthafrica-ci/onadata | onadata/libs/utils/bamboo.py | 2 | 5642 | import StringIO
import unicodecsv
from pybamboo.dataset import Dataset
from pybamboo.connection import Connection
from pybamboo.exceptions import ErrorParsingBambooData
from onadata.apps.logger.models.instance import Instance
from onadata.apps.viewer.pandas_mongo_bridge import CSVDataFrameBuilder,\
NoRecordsFoundError
from onadata.apps.restservice.models import RestService
def get_bamboo_url(xform):
try:
service = list(RestService.objects.filter(
xform=xform, name='bamboo')).pop()
except IndexError:
return 'http://bamboo.io'
return service.service_url
def delete_bamboo_dataset(xform):
if not xform.bamboo_dataset:
return False
try:
dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
dataset_id=xform.bamboo_dataset)
return dataset.delete()
except ErrorParsingBambooData:
return False
def ensure_rest_service(xform):
''' creates Bamboo RestService if doesn't exist '''
bb_url = get_bamboo_url(xform)
services = RestService.objects.filter(xform=xform, name='bamboo')
# do nothing if there's already a restservice for that.
if services.filter(service_url=bb_url).count():
return True
# there is no service ; let's create a default one.
if not services.count():
RestService.objects.create(xform=xform,
name='bamboo',
service_url=bb_url)
return True
# we have existing services with non-default URL
# do nothing as the user probably knows what to do.
return False
def get_new_bamboo_dataset(xform, force_last=False):
dataset_id = u''
try:
content_data = get_csv_data(xform, force_last=force_last)
dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
content=content_data,
na_values=['n/a'])
except NoRecordsFoundError:
return dataset_id
if dataset.id:
return dataset.id
return dataset_id
def get_csv_data(xform, force_last=False):
def getbuff():
return StringIO.StringIO()
def get_headers_from(csv_data):
csv_data.seek(0)
header_row = csv_data.readline()
csv_data.read()
return header_row.split(',')
def get_csv_data_manual(xform,
only_last=False, with_header=True,
headers_to_use=None):
# TODO: find out a better way to handle this
# when form has only one submission, CSVDFB is empty.
# we still want to create the BB ds with row 1
# so we extract is and CSV it.
instances = Instance.objects.filter(xform=xform).order_by(
'-date_modified')
if instances.count() == 0:
raise NoRecordsFoundError
else:
# we should only do it for count == 1 but eh.
csv_buf = getbuff()
if only_last:
instances = instances[0:1]
rows = [instance.get_full_dict() for instance in instances]
if headers_to_use is None:
headers_to_use = [key for key in rows[0].keys()
if not key.startswith('_')]
w = unicodecsv.DictWriter(csv_buf, fieldnames=headers_to_use,
extrasaction='ignore',
lineterminator='\n',
encoding='utf-8')
if with_header:
w.writeheader()
w.writerows(rows)
csv_buf.flush()
if not csv_buf.len:
raise NoRecordsFoundError
return csv_buf.getvalue()
# setup an IO stream
buff = getbuff()
# prepare/generate a standard CSV export.
# note that it omits the current submission (if called from rest)
csv_dataframe_builder = CSVDataFrameBuilder(xform.user.username,
xform.id_string)
try:
csv_dataframe_builder.export_to(buff)
if force_last:
# requested to add last submission to the buffer
buff.write(get_csv_data_manual(xform,
only_last=True, with_header=False,
headers_to_use=
get_headers_from(buff)))
except NoRecordsFoundError:
# verify that we don't have a single submission before giving up
get_csv_data_manual(xform, with_header=True)
if buff.len:
# rewrite CSV header so that meta fields (starting with _ or meta)
# are prefixed to ensure that the dataset will be joinable to
# another formhub dataset
prefix = (u'%(id_string)s_%(id)s'
% {'id_string': xform.id_string, 'id': xform.id})
new_buff = getbuff()
buff.seek(0)
reader = unicodecsv.reader(buff, encoding='utf-8')
writer = unicodecsv.writer(new_buff, encoding='utf-8')
is_header = True
for row in reader:
if is_header:
is_header = False
for idx, col in enumerate(row):
if col.startswith('_') or col.startswith('meta_')\
or col.startswith('meta/'):
row[idx] = (u'%(prefix)s%(col)s'
% {'prefix': prefix, 'col': col})
writer.writerow(row)
return new_buff.getvalue()
else:
raise NoRecordsFoundError
| bsd-2-clause |
reichelu/copasul | src/sigFunc.py | 1 | 54145 |
import scipy.io.wavfile as sio
import scipy.signal as sis
from scipy import interpolate
import numpy as np
import math
import matplotlib.pyplot as plt
import mylib as myl
import sys
import copy as cp
import re
import scipy.fftpack as sf
# NOTE: int2float might be removed after scipy update/check
# (check defaults in myl.sig_preproc)
# read wav file
# IN:
# fileName
# OUT:
# signal ndarray
# sampleRate
def wavread(f,opt={'do_preproc':True}):
## signal input
fs, s_in = sio.read(f)
# int -> float
s = myl.wav_int2float(s_in)
# preproc
if opt['do_preproc']:
s = myl.sig_preproc(s)
return s, fs
# DCT
# IN:
# y - 1D signal vector
# opt
# ['fs'] - sample rate
# ['wintyp'] - <'kaiser'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <1> additionally needed window parameters,
# scalar, string, list ..., depends on 'wintyp'
# ['nsm'] - <3> number of spectral moments
# ['rmo'] - skip first (lowest) cosine (=constant offset)
# in spectral moment calculation <1>|0
# ['lb'] - lower cutoff frequency for coef truncation <0>
# ['ub'] - upper cutoff frequency (if 0, no cutoff) <0>
# Recommended e.g. for f0 DCT, so that only influence
# of events with <= 10Hz on f0 contour is considered)
# ['peak_prct'] - <80> lower percentile threshold to be superseeded for
# amplitude maxima in DCT spectrum
# OUT:
# dct
# ['c_orig'] all coefs
# ['f_orig'] their frequencies
# ['c'] coefs with freq between lb and ub
# ['f'] their freqs
# ['i'] their indices in c_orig
# ['sm'] spectral moments based on c
# ['opt'] input options
# ['m'] y mean
# ['sd'] y standard dev
# ['cbin'] array of sum(abs(coef)) in frequency bins
# ['fbin'] corresponding lower boundary freqs
# ['f_max'] frequency of global amplitude maximum
# ['f_lmax'] frequencies of local maxima (array of minlen 1)
# ['c_cog'] the coef amplitude of the cog freq (sm[0])
# PROBLEMS:
# - if segment is too short (< 5 samples) lowest freqs associated to
# DCT components are too high for ub, that is dct_trunc() returns
# empty array.
# -> np.nan assigned to respective variables
def dct_wrapper(y,opt):
dflt={'wintyp':'kaiser','winparam':1,'nsm':3,'rmo':True,
'lb':0,'ub':0,'peak_prct':80}
opt = myl.opt_default(opt,dflt)
# weight window
w = sig_window(opt['wintyp'],len(y),opt['winparam'])
y = y*w
#print(1,len(y))
# centralize
y = y-np.mean(y)
#print(2,len(y))
# DCT coefs
c = sf.dct(y,norm='ortho')
#print(3,len(c))
# indices (starting with 0)
ly = len(y)
ci = myl.idx_a(ly)
# corresponding cos frequencies
f = ci+1 * (opt['fs']/(ly*2))
# band pass truncation of coefs
# indices of coefs with lb <= freq <= ub
i = dct_trunc(f,ci,opt)
#print('f ci i',f,ci,i)
# analysis segment too short -> DCT freqs above ub
if len(i)==0:
sm = myl.ea()
while len(sm) <= opt['nsm']:
sm = np.append(sm,np.nan)
return {'c_orig':c,'f_orig':f,'c':myl.ea(),'f':myl.ea(),'i':[],'sm':sm,'opt':opt,
'm':np.nan,'sd':np.nan,'cbin':myl.ea(),'fbin':myl.ea(),
'f_max':np.nan, 'f_lmax':myl.ea(), 'c_cog': np.nan}
# mean abs error from band-limited IDCT
#mae = dct_mae(c,i,y)
# remove constant offset with index 0
# already removed by dct_trunc in case lb>0. Thus checked for i[0]==0
# (i[0] indeed represents constant offset; tested by
# cr = np.zeros(ly); cr[0]=c[0]; yr = sf.idct(cr); print(yr)
if opt['rmo']==True and len(i)>1 and i[0]==0:
j = i[1:len(i)]
else:
j = i
if type(j) is not list: j = [j]
# coefs and their frequencies between lb and ub
# (+ constant offset removed)
fi = f[j]
ci = c[j]
# spectral moments
if len(j)>0:
sm = specmom(ci,fi,opt['nsm'])
else:
sm = np.zeros(opt['nsm'])
# frequency bins
fbin, cbin = dct_fbin(fi,ci,opt)
# frequencies of global and local maxima in DCT spectrum
f_max, f_lmax, px = dct_peak(ci,fi,sm[0],opt)
# return
return {'c_orig':c,'f_orig':f,'c':ci,'f':fi,'i':j,'sm':sm,'opt':opt,
'm':np.mean(y),'sd':np.std(y),'cbin':cbin,'fbin':fbin,
'f_max':f_max, 'f_lmax':f_lmax, 'c_cog': px}
# returns local and max peak frequencies
# IN:
# x: array of abs coef amplitudes
# f: corresponding frequencies
# cog: center of gravity
# OUT:
# f_gm: freq of global maximu
# f_lm: array of freq of local maxima
# px: threshold to be superseeded (derived from prct specs)
def dct_peak(x,f,cog,opt):
x = abs(cp.deepcopy(x))
## global maximum
i = myl.find(x,'is','max')
if len(i)>1:
i=int(np.mean(i))
f_gm = float(f[i])
## local maxima
# threshold to be superseeded
px = dct_px(x,f,cog,opt)
idx = myl.find(x,'>=',px)
# 2d array of neighboring+1 indices
# e.g. [[0,1,2],[5,6],[9,10]]
ii = []
# min freq distance between maxima
fd_min = 1
for i in myl.idx(idx):
if len(ii)==0:
ii.append([idx[i]])
elif idx[i]>ii[-1][-1]+1:
xi = x[ii[-1]]
fi = f[ii[-1]]
j = myl.find(xi,'is','max')
#print('xi',xi,'fi',fi,'f',f[idx[i]])
if len(j)>0 and f[idx[i]]>fi[j[0]]+fd_min:
#print('->1')
ii.append([idx[i]])
else:
#print('->2')
ii[-1].append(idx[i])
#myl.stopgo() #!c
else:
ii[-1].append(idx[i])
# get index of x maximum within each subsegment
# and return corresponding frequencies
f_lm = []
for si in ii:
zi = myl.find(x[si],'is','max')
if len(zi)>1:
zi=int(np.mean(zi))
else:
zi = zi[0]
i = si[zi]
if not np.isnan(i):
f_lm.append(f[i])
#print('px',px)
#print('i',ii)
#print('x',x)
#print('f',f)
#print('m',f_gm,f_lm)
#myl.stopgo()
return f_gm, f_lm, px
# return center-of-gravity related amplitude
# IN:
# x: array of coefs
# f: corresponding freqs
# cog: center of gravity freq
# opt
# OUT:
# coef amplitude related to cog
def dct_px(x,f,cog,opt):
x = abs(cp.deepcopy(x))
# cog outside freq range
if cog <= f[0]:
return x[0]
elif cog >= f[-1]:
return x[-1]
# find f-indices adjacent to cog
for i in range(len(f)-1):
if f[i] == cog:
return x[i]
elif f[i+1] == cog:
return x[i+1]
elif f[i] < cog and f[i+1] > cog:
# interpolate
#xi = np.interp(cog,f[i:i+2],x[i:i+2])
#print('cog:',cog,'xi',f[i:i+2],x[i:i+2],'->',xi)
return np.interp(cog,f[i:i+2],x[i:i+2])
return np.percentile(x,opt['peak_prct'])
# pre-emphasis
# alpha > 1 (interpreted as lower cutoff freq)
# alpha <- exp(-2 pi alpha delta)
# s'[n] = s[n]-alpha*s[n-1]
# IN:
# signal
# alpha - s[n-1] weight <0.95>
# fs - sample rate <-1>
# do_scale - <FALSE> if TRUE than the pre-emphasized signal is scaled to
# same abs_mean value as original signal (in general pre-emphasis
# leads to overall energy loss)
def pre_emphasis(y,a=0.95,fs=-1,do_scale=False):
# determining alpha directly or from cutoff freq
if a>1:
if fs <= 0:
print('pre emphasis: alpha cannot be calculated deltaT. Set to 0.95')
a = 0.95
else:
a = math.exp(-2*math.pi*a*1/fs)
#print('alpha',a)
# shifted signal
ype = np.append(y[0], y[1:] - a * y[:-1])
# scaling
if do_scale:
sf = np.mean(abs(y))/np.mean(abs(ype))
ype*=sf
## plot
#ys = y[30000:40000]
#ypes = ype[30000:40000]
#t = np.linspace(0,len(ys),len(ys))
#fig, spl = plt.subplots(2,1,squeeze=False)
#cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
#cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#spl[0,0].plot(t,ys)
#spl[1,0].plot(t,ypes)
#plt.show()
##
return ype
# frequency bins: symmetric 2-Hz windows around freq integers
# in bandpass overlapped by 1 Hz
# IN:
# f - ndarray frequencies
# c - ndarray coefs
# opt['lb'] - lower and upper truncation freqs
# ['ub']
# OUT:
# fbin - ndarray, lower bnd of freq bins
# cbin - ndarray, summed abs coef values in these bins
def dct_fbin(f,c,opt):
fb = myl.idx_seg(math.floor(opt['lb']),math.ceil(opt['ub']))
cbin = np.zeros(len(fb)-1);
for j in myl.idx_a(len(fb)-1):
k = myl.intersect(myl.find(f,'>=',fb[j]),
myl.find(f,'<=',fb[j+1]))
cbin[j] = sum(abs(c[k]))
fbin = fb[myl.idx_a(len(fb)-1)]
return fbin, cbin
# spectral moments
# IN:
# c - ndarray, coefficients
# f - ndarray, related frequencies <1:len(c)>
# n - number of spectral moments <3>
# OUT:
# m - ndarray moments (increasing)
def specmom(c,f=[],n=3):
if len(f)==0:
f = myl.idx_a(len(c))+1
c = abs(c)
s = sum(c)
k=0;
m = np.asarray([])
for i in myl.idx_seg(1,n):
m = myl.push(m, sum(c*((f-k)**i))/s)
k = m[-1]
return m
# wrapper around IDCT
# IN:
# c - coef vector derived by dct
# i - indices of coefs to be taken for IDCT; if empty (default),
# all coefs taken)
# OUT:
# y - IDCT result
def idct_bp(c,i=myl.ea()):
if len(i)==0:
return sf.idct(c,norm='ortho')
cr = np.zeros(len(c))
cr[i]=c[i]
return sf.idct(cr)
# mean abs error from IDCT
def dct_mae(c,i,y):
cr = np.zeros(len(c))
cr[i]=c[i]
yr = sf.idct(cr)
return myl.mae(yr,y)
# indices to truncate DCT output to freq band
# IN:
# f - ndarray, all frequencies
# ci - all indices of coef ndarray
# opt['lb'] - lower cutoff freq
# ['ub'] - upper cutoff freq
# OUT:
# i - ndarray, indices in F of elements to be kept
def dct_trunc(f,ci,opt):
if opt['lb']>0:
ihp = myl.find(f,'>=',opt['lb'])
else:
ihp = ci
if opt['ub']>0:
ilp = myl.find(f,'<=',opt['ub'])
else:
ilp = ci
return myl.intersect(ihp,ilp)
# wrapper around wavread and energy calculation
# IN:
# f: wavFileName (any number of channels) or array containing
# the signal (any number of channels=columns)
# opt: energy extraction and postprocessing
# .win, .wintyp, .winparam: window parameters
# .sts: stepsize for energy contour
# .do_preproc: centralizing signal
# .do_out: remove outliers
# .do_interp: linear interpolation over silence
# .do_smooth: smoothing (median or savitzky golay)
# .out dict; see pp_outl()
# .smooth dict; see pp_smooth()
# fs: <-1> needed if f is array
# OUT:
# y: time + energy contour 2-dim np.array
# (1st column: time, other columns: energy)
def wrapper_energy(f,opt = {}, fs = -1):
opt = myl.opt_default(opt,{'wintyp':'hamming',
'winparam':'',
'sts':0.01,
'win':0.05,
'do_preproc': True,
'do_out': False,
'do_interp': False,
'do_smooth': False,
'out': {},
'smooth': {}})
opt['out'] = myl.opt_default(opt['out'], {'f': 3,
'm': 'mean'})
opt['smooth'] = myl.opt_default(opt['smooth'],{"mtd": "sgolay",
"win": 7,
"ord": 3})
if type(f) is str:
s, fs = wavread(f,opt)
else:
if fs < 0:
sys.exit("array input requires sample rate fs. Exit.")
s = f
opt['fs']=fs
# convert to 2-dim array; each column represents a channel
if np.ndim(s)==1:
s = np.expand_dims(s, axis=1)
# output (.T-ed later, reserve first list for time)
y = myl.ea()
# over channels
for i in np.arange(0,s.shape[1]):
e = sig_energy(s[:,i],opt)
# setting outlier to 0
if opt['do_out']:
e = pp_outl(e,opt['out'])
# interpolation over 0
if opt['do_interp']:
e = pp_interp(e)
# smoothing
if opt['do_smooth']:
e = pp_smooth(e,opt['smooth'])
# <0 -> 0
e[myl.find(e,'<',0)]=0
y = myl.push(y,e)
# output
if np.ndim(y)==1:
y = np.expand_dims(y, axis=1)
else:
y = y.T
# concat time as 1st column
sts = opt['sts']
t = np.arange(0,sts*y.shape[0],sts)
if len(t) != y.shape[0]:
while len(t) > y.shape[0]:
t = t[0:len(t)-1]
while len(t) < y.shape[0]:
t = np.append(t,t[-1]+sts)
t = np.expand_dims(t, axis=1)
y = np.concatenate((t,y),axis=1)
return y
### replacing outliers by 0 ###################
def pp_outl(y,opt):
if "m" not in opt:
return y
# ignore zeros
opt['zi'] = True
io = myl.outl_idx(y,opt)
if np.size(io)>0:
y[io] = 0
return y
### interpolation over 0 (+constant extrapolation) #############
def pp_interp(y,opt={}):
xi = myl.find(y,'==',0)
xp = myl.find(y,'>',0)
yp = y[xp]
if "kind" in opt:
f = interpolate.interp1d(xp,yp,kind=opt["kind"],
fill_value=(yp[0],yp[-1]))
yi = f(xi)
else:
yi = np.interp(xi,xp,yp)
y[xi]=yi
return y
#!check
### smoothing ########################################
# remark: savgol_filter() causes warning
# Using a non-tuple sequence for multidimensional indexing is deprecated
# will be out with scipy.signal 1.2.0
# (https://github.com/scipy/scipy/issues/9086)
def pp_smooth(y,opt):
if opt['mtd']=='sgolay':
if len(y) <= opt['win']:
return y
y = sis.savgol_filter(y,opt['win'],opt['ord'])
elif opt['mtd']=='med':
y = sis.medfilt(y,opt['win'])
return y
# calculates energy contour from acoustic signal
# do_preproc per default False. If not yet preprocessed by myl.sig_preproc()
# set to True
# IN:
# x ndarray signal
# opt['fs'] - sample frequency
# ['wintyp'] - <'hamming'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <''> additionally needed window parameters,
# scalar, string, list ...
# ['sts'] - stepsize of moving window
# ['win'] - window length
# OUT:
# y ndarray energy contour
def sig_energy(x,opt):
dflt={'wintyp':'hamming','winparam':'','sts':0.01,'win':0.05}
opt = myl.opt_default(opt,dflt)
# stepsize and winlength in samples
sts = round(opt['sts']*opt['fs'])
win = min([math.floor(len(x)/2),round(opt['win']*opt['fs'])])
# weighting window
w = sig_window(opt['wintyp'],win,opt['winparam'])
# energy values
y = np.asarray([])
for j in myl.idx_a(len(x)-win,sts):
s = x[j:j+len(w)]*w
y = myl.push(y,myl.rmsd(s))
return y
# wrapper around windows
# IN:
# typ: any type supported by scipy.signal.get_window()
# lng: <1> length
# par: <''> additional parameters as string, scalar, list etc
# OUT:
# window array
def sig_window(typ,l=1,par=''):
if typ=='none' or typ=='const':
return np.ones(l)
if ((type(par) is str) and (len(par) == 0)):
return sis.get_window(typ,l)
return sis.get_window((typ,par),l)
# pause detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - idx onset <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <8000> (evtl. lowered by fu_filt())
# ['btype'] - <'band'>|'high'|<'low'>
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length (in sec)
# ['l_ref'] - reference window length (in sec)
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['fbnd'] - True|<False> assume pause at beginning and end of file
# ['n'] - <-1> extract exactly n pauses (if > -1)
# ['min_pau_l'] - min pause length <0.5> sec
# ['min_chunk_l'] - min inter-pausal chunk length <0.2> sec
# ['force_chunk'] - <False>, if True, pause-only is replaced by chunk-only
# ['margin'] - <0> time to reduce pause on both sides (sec; if chunks need init and final silence)
# OUT:
# pau['tp'] 2-dim array of pause [on off] (in sec)
# ['tpi'] 2-dim array of pause [on off] (indices in s = sampleIdx-1 !!)
# ['tc'] 2-dim array of speech chunks [on off] (i.e. non-pause, in sec)
# ['tci'] 2-dim array of speech chunks [on off] (indices)
# ['e_ratio'] - energy ratios corresponding to pauses in ['tp'] (analysisWindow/referenceWindow)
def pau_detector(s,opt={}):
if 'fs' not in opt:
sys.exit('pau_detector: opt does not contain key fs.')
dflt = {'e_rel':0.0767,'l':0.1524,'l_ref':5,'n':-1,'fbnd':False,'ons':0,'force_chunk':False,
'min_pau_l':0.4,'min_chunk_l':0.2,'margin':0,
'flt':{'btype':'low','f':np.asarray([8000]),'ord':5}}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
## removing DC, low-pass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
## pause detection for >=n pauses
t, e_ratio = pau_detector_sub(y,opt)
if len(t)>0:
## extending 1st and last pause to file boundaries
if opt['fbnd']==True:
t[0,0]=0
t[-1,-1]=len(y)-1
## merging pauses across too short chunks
## merging chunks across too small pauses
if (opt['min_pau_l']>0 or opt['min_chunk_l']>0):
t, e_ratio = pau_detector_merge(t,e_ratio,opt)
## too many pauses?
# -> subsequently remove the ones with highest e-ratio
if (opt['n']>0 and len(t)>opt['n']):
t, e_ratio = pau_detector_red(t,e_ratio,opt)
## speech chunks
tc = pau2chunk(t,len(y))
## pause-only -> chunk-only
if (opt['force_chunk']==True and len(tc)==0):
tc = cp.deepcopy(t)
t = np.asarray([])
e_ratio = np.asarray([])
## add onset
t = t+opt['ons']
tc = tc+opt['ons']
## return dict
## incl fields with indices to seconds (index+1=sampleIndex)
pau={'tpi':t, 'tci':tc, 'e_ratio': e_ratio}
pau['tp'] = myl.idx2sec(t,opt['fs'])
pau['tc'] = myl.idx2sec(tc,opt['fs'])
#print(pau)
return pau
# merging pauses across too short chunks
# merging chunks across too small pauses
# IN:
# t [[on off]...] of pauses
# e [e_rat ...]
# OUT:
# t [[on off]...] merged
# e [e_rat ...] merged (simply mean of merged segments taken)
def pau_detector_merge(t,e,opt):
## min pause and chunk length in samples
mpl = myl.sec2smp(opt['min_pau_l'],opt['fs'])
mcl = myl.sec2smp(opt['min_chunk_l'],opt['fs'])
## merging chunks across short pauses
tm = np.asarray([])
em = np.asarray([])
for i in myl.idx_a(len(t)):
if ((t[i,1]-t[i,0] >= mpl) or
(opt['fbnd']==True and (i==0 or i==len(t)-1))):
tm = myl.push(tm,t[i,:])
em = myl.push(em,e[i])
# nothing done in previous step?
if len(tm)==0:
tm = cp.deepcopy(t)
em = cp.deepcopy(e)
if len(tm)==0:
return t, e
## merging pauses across short chunks
tn = np.asarray([tm[0,:]])
en = np.asarray([em[0]])
if (tn[0,0]<mcl): tn[0,0]=0
for i in np.arange(1,len(tm),1):
if (tm[i,0] - tn[-1,1] < mcl):
tn[-1,1] = tm[i,1]
en[-1] = np.mean([en[-1],em[i]])
else:
tn = myl.push(tn,tm[i,:])
en = myl.push(en,em[i])
#print("t:\n", t, "\ntm:\n", tm, "\ntn:\n", tn) #!v
return tn, en
# pause to chunk intervals
# IN:
# t [[on off]] of pause segments (indices in signal)
# l length of signal vector
# OUT:
# tc [[on off]] of speech chunks
def pau2chunk(t,l):
if len(t)==0:
return np.asarray([[0,l-1]])
if t[0,0]>0:
tc = np.asarray([[0,t[0,0]-1]])
else:
tc = np.asarray([])
for i in np.arange(0,len(t)-1,1):
if t[i,1] < t[i+1,0]-1:
tc = myl.push(tc,[t[i,1]+1,t[i+1,0]-1])
if t[-1,1]<l-1:
tc = myl.push(tc,[t[-1,1]+1,l-1])
return tc
# called by pau_detector
# IN:
# as for pau_detector
# OUT:
# t [on off]
# e_ratio
def pau_detector_sub(y,opt):
## settings
# reference window span
rl = math.floor(opt['l_ref']*opt['fs'])
# signal length
ls = len(y)
# min pause length
ml = opt['l']*opt['fs']
# global rmse and pause threshold
e_rel = cp.deepcopy(opt['e_rel'])
# global rmse
# as fallback in case reference window is likely to be pause
# almost-zeros excluded (cf percentile) since otherwise pauses
# show a too high influence, i.e. lower the reference too much
# so that too few pauses detected
#e_glob = myl.rmsd(y)
ya = abs(y)
qq = np.percentile(ya,[50])
e_glob = myl.rmsd(ya[ya>qq[0]])
t_glob = opt['e_rel']*e_glob
# stepsize
sts=max([1,math.floor(0.05*opt['fs'])])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rl,'rng':[0,ls]}
# loop until opt.n criterion is fulfilled
# increasing energy threshold up to 1
while e_rel < 1:
# pause [on off], pause index
t=np.asarray([])
j=0
# [e_y/e_rw] indices as in t
e_ratio=np.asarray([])
i_steps = np.arange(1,ls,sts)
for i in i_steps:
# window
yi = myl.windowing_idx(i,wopt_en)
e_y = myl.rmsd(y[yi])
# energy in reference window
e_r = myl.rmsd(y[myl.windowing_idx(i,wopt_ref)])
# take overall energy as reference if reference window is pause
if (e_r <= t_glob):
e_r = e_glob
# if rmse in window below threshold
if e_y <= e_r*e_rel:
yis = yi[0]
yie = yi[-1]
if len(t)-1==j:
# values belong to already detected pause
if len(t)>0 and yis<t[j,1]:
t[j,1]=yie
# evtl. needed to throw away superfluous
# pauses with high e_ratio
e_ratio[j]=np.mean([e_ratio[j],e_y/e_r])
else:
t = myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
j=j+1
else:
t=myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
# (more than) enough pauses detected?
if len(t) >= opt['n']: break
e_rel = e_rel+0.1
if opt['margin']==0 or len(t)==0:
return t, e_ratio
# shorten pauses by margins
mar=int(opt['margin']*opt['fs'])
tm, erm = myl.ea(), myl.ea()
for i in myl.idx_a(len(t)):
# only slim non-init and -fin pauses
if i>0:
ts = t[i,0]+mar
else:
ts = t[i,0]
if i < len(t)-1:
te = t[i,1]-mar
else:
te = t[i,1]
# pause disappeared
if te <= ts:
# ... but needs to be kept
if opt['n']>0:
tm = myl.push(tm,[t[i,0],t[i,1]])
erm = myl.push(erm,e_ratio[i])
continue
# pause still there
tm = myl.push(tm,[ts,te])
erm = myl.push(erm,e_ratio[i])
return tm, erm
def pau_detector_red(t,e_ratio,opt):
# keep boundary pauses
if opt['fbnd']==True:
n=opt['n']-2
#bp = [t[0,],t[-1,]]
bp = np.concatenate((np.array([t[0,]]),np.array([t[-1,]])),axis=0)
ii = np.arange(1,len(t)-1,1)
t = t[ii,]
e_ratio=e_ratio[ii]
else:
n=opt['n']
bp=np.asarray([])
if n==0:
t=[]
# remove pause with highest e_ratio
while len(t)>n:
i = myl.find(e_ratio,'is','max')
j = myl.find(np.arange(1,len(e_ratio),1),'!=',i[0])
t = t[j,]
e_ratio = e_ratio[j]
# re-add boundary pauses if removed
if opt['fbnd']==True:
if len(t)==0:
t=np.concatenate((np.array([bp[0,]]),np.array([bp[1,]])),axis=0)
else:
t=np.concatenate((np.array([bp[0,]]),np.array([t]),np.array([bp[1,]])),axis=0)
return t, e_ratio
# spectral balance calculation according to Fant 2000
# IN:
# sig: signal (vowel segment)
# fs: sampe rate
# opt:
# 'win': length of central window in ms <len(sig)>; -1 is same as len(sig)
# 'ub': upper freq boundary in Hz <-1> default: no low-pass filtering
# 'domain': <'freq'>|'time'; pre-emp in frequency (Fant) or time domain
# 'alpha': <0.95> for time domain only y[n] = x[n]-alpha*x[n-1]
# if alpha>0 it is interpreted as lower freq threshold for pre-emp
# OUT:
# sb: spectral tilt
def splh_spl(sig,fs,opt_in={}):
opt = cp.deepcopy(opt_in)
opt = myl.opt_default(opt,{'win':len(sig),'f':-1,'btype':'none',
'domain':'freq','alpha':0.95})
#print(opt)
#myl.stopgo()
## cut out center window ##################################
ls = len(sig)
if opt['win'] <= 0:
opt['win'] = ls
if opt['win'] < ls:
wi = myl.windowing_idx(int(ls/2),
{'rng':[0, ls],
'win':int(opt['win']*fs)})
y = sig[wi]
else:
y = cp.deepcopy(sig)
if len(y)==0:
return np.nan
# reference sound pressure level
p_ref = pRef('spl')
## pre-emp in time domain ####################################
if opt['domain']=='time':
# low pass filtering
if opt['btype'] != 'none':
flt = fu_filt(y,{'fs':fs,'f':opt['f'],'ord':6,
'btype':opt['btype']})
y = flt['y']
yp = pre_emphasis(y,opt['alpha'],fs,False)
y_db = 20*np.log10(myl.rmsd(y)/p_ref)
yp_db = 20*np.log10(myl.rmsd(yp)/p_ref)
#print(yp_db - y_db)
return yp_db - y_db
## pre-emp in frequency domain ##############################
# according to Fant
# actual length of cut signal
n = len(y)
## hamming windowing
y *= np.hamming(n)
## spectrum
Y = np.fft.fft(y,n)
N = int(len(Y)/2)
## frequency components
XN = np.fft.fftfreq(n,d=1/fs)
X = XN[0:N]
# same as X = np.linspace(0, fs/2, N, endpoint=True)
## amplitudes
# sqrt(Y.real**2 + Y.imag**2)
# to be normalized:
# *2 since only half of transform is used
# /N since output needs to be normalized by number of samples
# (tested on sinus, cf
# http://www.cbcity.de/die-fft-mit-python-einfach-erklaert)
a = 2*np.abs(Y[:N])/N
## vowel-relevant upper frequency boundary
if opt['btype'] != 'none':
vi = fu_filt_freq(X,opt)
if len(vi)>0:
X = X[vi]
a = a[vi]
## Fant preemphasis filter (Fant et al 2000, p10f eq 20)
preemp = 10*np.log10((1+X**2/200**2)/(1+X**2/5000**2))
ap = 10*np.log10(a)+preemp
# retransform to absolute scale
ap = 10**(ap/10)
# corresponds to gain values in Fant 2000, p11
#for i in myl.idx(a):
# print(X[i],preemp[i])
#myl.stopgo()
## get sound pressure level of both spectra
# as 20*log10(P_eff/P_ref)
spl = 20*np.log10(myl.rmsd(a)/p_ref)
splh = 20*np.log10(myl.rmsd(ap)/p_ref)
## get energy level of both spectra
#spl = 20*np.log10(myl.mse(a)/p_ref)
#splh = 20*np.log10(myl.mse(ap)/p_ref)
## spectral balance
sb = splh-spl
#print(spl,splh,sb)
#myl.stopgo()
#fig = plt.figure()
#plt.plot(X,20*np.log10(a),'b')
#plt.plot(X,20*np.log10(preemp),'g')
#plt.plot(X,20*np.log10(ap),'r')
#plt.show()
return sb
# returns indices of freq in x fullfilling conditions in opt
# IN:
# X: freq array
# opt: 'btype' - 'none'|'low'|'high'|'band'|'stop'
# 'f': 1 freq for low|high, 2 freq for band|stop
# OUT:
# i: indices in X fulfilling condition
def fu_filt_freq(X,opt):
typ = opt['btype']
f = opt['f']
# all indices
if typ=='none':
return myl.idx_a(len(X))
# error handling
if re.search('(band|stop)',typ) and (not myl.listType(f)):
print('filter type requires frequency list. Done nothing.')
return myl.idx_a(len(X))
if re.search('(low|high)',typ) and myl.listType(f):
print('filter type requires only 1 frequency value. Done nothing.')
return myl.idx_a(len(X))
if typ=='low':
return np.nonzero(X<=f)
elif typ=='high':
return np.nonzero(X>=f)
elif typ == 'band':
i = set(np.nonzero(X>=f[0]))
return np.sort(np.array(i.intersection(set(np.nonzero(X<=f[1])))))
elif typ == 'stop':
i = set(np.nonzero(X<=f[0]))
return np.sort(np.array(i.union(set(np.nonzero(X>=f[1])))))
return myl.idx_a(len(X))
# returns reverence levels for typ
# IN:
# typ
# 'spl': sound pressure level
# 'i': intensity level
# OUT:
# corresponding reference level
def pRef(typ):
if typ=='spl':
return 2*10**(-5)
return 10**(-12)
# syllable nucleus detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - onset in sec <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <np.asarray([200,4000])>
# ['btype'] - <'band'>|'high'|'low'
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length
# ['l_ref'] - reference window length
# ['d_min'] - min distance between subsequent nuclei (in sec)
# ['e_min'] - min energy required for nucleus as a proportion to max energy <0.16>
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['e_val'] - quotient, how sagged the energy valley between two nucleus
# candidates should be. Measured relative to the lower energy
# candidate. The lower, the deeper the required valley between
# two peaks. Meaningful range ]0, 1]. Recommended range:
# [0.9 1[
# ['center'] - boolean; subtract mean energy
# OUT:
# ncl['t'] - vector of syl ncl time stamps (in sec)
# ['ti'] - corresponding vector idx in s
# ['e_ratio'] - corresponding energy ratios (analysisWindow/referenceWindow)
# bnd['t'] - vector of syl boundary time stamps (in sec)
# ['ti'] - corresponding vector idx in s
# ['e_ratio'] - corresponding energy ratios (analysisWindow/referenceWindow)
def syl_ncl(s,opt={}):
## settings
if 'fs' not in opt:
sys.exit('syl_ncl: opt does not contain key fs.')
dflt = {'flt':{'f':np.asarray([200,4000]),'btype':'band','ord':5},
'e_rel':1.05,'l':0.08,'l_ref':0.15, 'd_min':0.12, 'e_min':0.1,
'ons':0, 'e_val': 1, 'center': False}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
if syl_ncl_trouble(s,opt):
t = np.asarray([round(len(s)/2+opt['ons'])])
ncl = {'ti':t, 't':myl.idx2sec(t,opt['fs']), 'e_ratio':[0]}
bnd = cp.deepcopy(ncl)
return ncl, bnd
# reference window length
rws = math.floor(opt['l_ref']*opt['fs'])
# energy win length
ml = math.floor(opt['l']*opt['fs'])
# stepsize
sts = max([1,math.floor(0.03*opt['fs'])])
# minimum distance between subsequent nuclei
# (in indices)
#md = math.floor(opt['d_min']*opt['fs']/sts)
md = math.floor(opt['d_min']*opt['fs'])
# bandpass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
# signal length
ls = len(y)
# minimum energy as proportion of maximum energy found
e_y = np.asarray([])
i_steps = np.arange(1,ls,sts)
for i in i_steps:
yi = np.arange(i,min([ls,i+ml-1]),1)
e_y = np.append(e_y,myl.rmsd(y[yi]))
if bool(opt['center']):
e_y -= np.mean(e_y)
e_min = opt['e_min']*max(e_y)
# output vector collecting nucleus sample indices
t = np.asarray([])
all_i = np.asarray([])
all_e = np.asarray([])
all_r = np.asarray([])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rws,'rng':[0,ls]}
for i in i_steps:
yi = myl.windowing_idx(i,wopt_en)
#yi = np.arange(yw[0],yw[1],1)
ys = y[yi]
e_y = myl.rmsd(ys)
#print(ys,'->',e_y)
ri = myl.windowing_idx(i,wopt_ref)
#ri = np.arange(rw[0],rw[1],1)
rs = y[ri]
e_rw = myl.rmsd(rs)
all_i = np.append(all_i,i)
all_e = np.append(all_e,e_y)
all_r = np.append(all_r,e_rw)
# local energy maxima
# (do not use min duration md for order option, since local
# maximum might be obscured already by energy increase
# towards neighboring peak further away than md, and not only by
# closer than md peaks)
idx = sis.argrelmax(all_e,order=1)
#plot_sylncl(all_e,idx) #!v
#print(opt["ons"]/opt["fs"] + np.array(idx)*sts/opt["fs"]) #!v
#myl.stopgo() #!v
### maxima related to syl ncl
## a) energy constraints
# timestamps (idx)
tx = np.asarray([])
# energy ratios
e_ratiox = np.asarray([])
# idx in all_i
tix = np.asarray([]).astype(int)
for i in idx[0]:
# valley between this and previous nucleus deep enough?
if len(tix)>0:
ie = all_e[tix[-1]:i]
if len(ie)<3:
continue
valley = np.min(ie)
nclmin = np.min([ie[0],all_e[i]])
if valley >= opt['e_val'] * nclmin:
# replace previous nucleus by current one
if all_e[i] > ie[0]: #!n
all_e[tix[-1]] = all_e[i] #!n
tx[-1] = all_i[i] #!n
tix[-1] = i #!n
e_ratiox[-1] = all_e[i]/all_r[i] #!n
#print("valley constraint -- tx:", all_i[i]/opt["fs"], "nclmin:", nclmin, "valley:", valley, "ie0:", ie[0], "all_e:", all_e[i], "--> skip!") #!v
continue
if ((all_e[i] >= all_r[i]*opt['e_rel']) and (all_e[i] > e_min)):
tx = np.append(tx,all_i[i])
tix = np.append(tix,i)
e_ratiox = np.append(e_ratiox, all_e[i]/all_r[i])
#else: #!v
# print("min_en constraint -- tx:", all_i[i]/opt["fs"], "all_e:", all_e[i], "all_r:", all_r[i], "e_min:", e_min, "--> skip!") #!v
#print(len(tx)) #!v
if len(tx)==0:
dflt = {'ti':myl.ea(),
't':myl.ea(),
'e_ratio':myl.ea()}
return dflt, dflt
#plot_sylncl(all_e,tix) #!v
## b) min duration constraints
# init by first found ncl
t = np.array([tx[0]])
e_ratio = np.array([e_ratiox[0]])
# idx in all_i
ti = np.array([tix[0]]).astype(int)
for i in range(1,len(tx)):
# ncl too close
if np.abs(tx[i]-t[-1]) < md:
# current ncl with higher energy: replace last stored one
if e_ratiox[i] > e_ratio[-1]:
t[-1] = tx[i]
ti[-1] = tix[i]
e_ratio[-1] = e_ratiox[i]
else:
t = np.append(t,tx[i])
ti = np.append(ti,tix[i])
e_ratio = np.append(e_ratio,e_ratiox[i])
#plot_sylncl(all_e,ti) #!v
### minima related to syl bnd
tb = np.asarray([])
e_ratio_b = np.asarray([])
if len(t)>1:
for i in range(len(ti)-1):
j = myl.idx_seg(ti[i],ti[i+1])
j_min = myl.find(all_e[j],'is','min')
if len(j_min)==0: j_min=[0]
# bnd idx
bj = j[0]+j_min[0]
tb = np.append(tb,all_i[bj])
e_ratio_b = np.append(e_ratio_b, all_e[bj]/all_r[bj])
# add onset
t = t+opt['ons']
tb = tb+opt['ons']
# output dict,
# incl idx to seconds
ncl = {'ti':t, 't':myl.idx2sec(t,opt['fs']), 'e_ratio':e_ratio}
bnd = {'ti':tb, 't':myl.idx2sec(tb,opt['fs']), 'e_ratio':e_ratio_b}
#print(ncl['t'], e_ratio)
return ncl, bnd
def syl_ncl_trouble(s,opt):
if len(s)/opt['fs'] < 0.1:
return True
return False
# wrapper around Butter filter
# IN:
# 1-dim vector
# opt['fs'] - sample rate
# ['f'] - scalar (high/low) or 2-element vector (band) of boundary freqs
# ['order'] - order
# ['btype'] - band|low|high; all other values: signal returned as is
# OUT:
# flt['y'] - filtered signal
# ['b'] - coefs
# ['a']
def fu_filt(y,opt):
# do nothing
if not re.search('^(high|low|band)$',opt['btype']):
return {'y': y, 'b': myl.ea(), 'a': myl.ea()}
# check f<fs/2
if (opt['btype'] == 'low' and opt['f']>=opt['fs']/2):
opt['f']=opt['fs']/2-100
elif (opt['btype'] == 'band' and opt['f'][1]>=opt['fs']/2):
opt['f'][1]=opt['fs']/2-100
fn = opt['f']/(opt['fs']/2)
b, a = sis.butter(opt['ord'], fn, btype=opt['btype'])
yf = sis.filtfilt(b,a,y)
return {'y':yf,'b':b,'a':a}
##### discontinuity measurement #######################################
# measures delta and linear fit discontinuities between
# adjacent array elements in terms of:
# - delta
# - reset of regression lines
# - root mean squared deviation between overall regression line and
# -- preceding segment's regression line
# -- following segment's regression line
# -- both, preceding and following, regression lines
# - extrapolation rmsd between following regression line
# and following regression line, extrapolated by regression
# on preceding segment
# IN:
# x: nx2 array [[time val] ...]
# OR
# nx1 array [val ...]
# for the latter indices are taken as time stamps
# ts: nx1 array [time ...] of time stamps (or indices for size(x)=nx1)
# at which to calculate discontinuity; if empty, discontinuity is
# calculated at each point in time. If size(x)=nx1 ts MUST contain
# indices
# nx2 array [[t_off t_on] ...] to additionally account for pauses
# opt: dict
# .win: <'glob'>|'loc' calculate discontinuity over entire sequence
# or within window
# .l: <3> if win==loc, length of window in sec or idx
# (splitpoint - .l : splitpoint + .l)
# .do_plot: <0> plots orig contour and linear stylization
# .plot: <{}> dict with plotting options; cf. discont_seg()
# OUT:
# d dict
# (s1: pre-bnd segment [i-l,i[,
# s2: post-bnd segment [i,i+l]
# sc: joint segment [i-l,i+l])
# dlt: delta
# res: reset
# ry1: s1, rmsd between joint vs pre-bnd fit
# ry2: s2, rmsd between joint vs post-bnd fit
# ryc: sc, rmsd between joint vs pre+post-bnd fit
# ry2e: s2: rmsd between pre-bnd fit extrapolated to s2 and post-bnd fit
# rx1: s1, rmsd between joint fit and pre-boundary x-values
# rx2: s2, rmsd between joint fit and post-boundary x-values
# rxc: sc, rmsd between joint fit and pre+post-boundary x-values
# rr1: s1, ratio rmse(joint_fit)/rmse(pre-bnd_fit)
# rr2: s2, ratio rmse(joint_fit)/rmse(post-bnd_fit)
# rrc: sc, ratio rmse(joint_fit)/rmse(pre+post-bnd_fit)
# ra1: c1-rate s1
# ra2: c1-rate s2
# dlt_ra: ra2-ra1
# s1_c3: cubic fitting coefs of s1
# s1_c2
# s1_c1
# s1_c0
# s2_c3: cubic fitting coefs of s2
# s2_c2
# s2_c1
# s2_c0
# dlt_c3: s2_c3-s1_c3
# dlt_c2: s2_c2-s1_c2
# dlt_c1: s2_c1-s1_c1
# dlt_c0: s2_c0-s1_c0
# eucl_c: euclDist(s1_c*,s2_c*)
# corr_c: corr(s1_c*,s2_c*)
# v1: variance in s1
# v2: variance in s2
# vc: variance in sc
# vr: variance ratio (mean(v1,v2))/vc
# dlt_v: v2-v1
# m1: mean in s1
# m2: mean in s2
# dlt_m: m2-m1
# p: pause length (in sec or idx depending on numcol(x);
# always 0, if t is empty or 1-dim)
# i in each list refers to discontinuity between x[i-1] and x[i]
# dimension of each list: if len(ts)==0: n-1 array (first x-element skipped)
# else: mx6; m is number of ts-elements in range of x[:,0],
# resp. in index range of x[1:-1]
## REMARKS:
# for all variables but corr_c and vr higher values indicate higher discontinuity
## variables:
# x1: original f0 contour for s1
# x2: original f0 contour for s2
# xc: original f0 contour for sc
# y1: line fitted on segment a
# y2: line fitted on segment b
# yc: line fitted on segments a+b
# yc1: yc part for x1
# yc2: yc part for x2
# ye: x1/y1-fitted line for x2
# cu1: cubic fit coefs of time-nrmd s1
# cu2: cubic fit coefs of time-nrmd s2
# yu1: polyval(cu1)
# yu2: polyval(cu2); yu1 and yu2 are cut to same length
def discont(x,ts=[],opt={}):
# time: first column or indices
if np.ndim(x)==1:
t = np.arange(0,len(x))
x = np.asarray(x)
else:
t = x[:,0]
x = x[:,1]
# tsi: index pairs in x for which to derive discont values
# [[infimum supremum]...] s1 right-aligned to infimum, s2 left-aligne to supremum
# for 1-dim ts both values are adjacent [[i-1, i]...]
# zp: zero pause True for 1-dim ts input, False for 2-dim
tsi, zp = discont_tsi(t,ts)
# opt init
opt = myl.opt_default(opt,{'win':'glob','l':3,'do_plot':False,
'plot': {}})
# output
d = discont_init()
# linear fits
# over time stamp pairs
for ii in tsi:
## delta
d['dlt'].append(x[ii[1]]-x[ii[0]])
## segments (x, y values of pre-, post, joint segments)
t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2 = discont_seg(t,x,ii,opt)
d = discont_feat(d,t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2,zp)
# to np.array
for x in d:
d[x] = np.asarray(d[x])
return d
# init discont dict
def discont_init():
return {"dlt": [],
"res": [],
"ry1": [],
"ry2": [],
"ryc": [],
"ry2e": [],
"rx1": [],
"rx2": [],
"rxc": [],
"rr1": [],
"rr2": [],
"rrc": [],
"ra1": [],
"ra2": [],
"dlt_ra": [],
"s1_c3": [],
"s1_c2": [],
"s1_c1": [],
"s1_c0": [],
"s2_c3": [],
"s2_c2": [],
"s2_c1": [],
"s2_c0": [],
"dlt_c3": [],
"dlt_c2": [],
"dlt_c1": [],
"dlt_c0": [],
"eucl_c": [],
"corr_c": [],
"eucl_y": [],
"corr_y": [],
"v1": [],
"v2": [],
"vc": [],
"vr": [],
"dlt_v": [],
"m1": [],
"m2": [],
"dlt_m": [],
"p": []}
# pre/post-boundary and joint segments
def discont_seg(t,x,ii,opt):
# preceding, following segment indices
i1, i2 = discont_idx(t,ii,opt)
#print(ii,"\n-> ", i1,"\n-> ", i2) #!v
#myl.stopgo() #!v
t1, t2, x1, x2 = t[i1], t[i2], x[i1], x[i2]
tc = np.concatenate((t1,t2))
xc = np.concatenate((x1,x2))
# normalized time (only needed for reported polycoefs, not
# for output lines
tn1 = myl.nrm_vec(t1,{'mtd': 'minmax',
'rng': [-1, 1]})
tn2 = myl.nrm_vec(t2,{'mtd': 'minmax',
'rng': [-1, 1]})
# linear fit coefs
c1 = myPolyfit(t1,x1,1)
c2 = myPolyfit(t2,x2,1)
cc = myPolyfit(tc,xc,1)
# cubic fit coefs (for later shape comparison)
cu1 = myPolyfit(tn1,x1,3)
cu2 = myPolyfit(tn2,x2,3)
yu1 = np.polyval(cu1,tn1)
yu2 = np.polyval(cu2,tn2)
# cut to same length (from boundary)
ld = len(yu1)-len(yu2)
if ld>0:
yu1=yu1[ld:len(yu1)]
elif ld<0:
yu2=yu2[0:ld]
# robust treatment
while len(yu2)<len(yu1):
yu2 = np.append(yu2,yu2[-1])
while len(yu1)<len(yu2):
yu1 = np.append(yu1,yu1[-1])
# fit values
y1 = np.polyval(c1,t1)
y2 = np.polyval(c2,t2)
yc = np.polyval(cc,tc)
# distrib yc over t1 and t2
yc1, yc2 = yc[0:len(y1)], yc[len(y1):len(yc)]
# linear extrapolation
ye = np.polyval(c1,t2)
# legend_loc: 'upper left'
## plotting linear fits
# segment boundary
xb = []
xb.extend(yu1)
xb.extend(yu2)
xb.extend(ye)
xb.extend(x1)
xb.extend(x2)
xb = np.asarray(xb)
if opt['do_plot'] and len(xb)>0:
lw1, lw2 = 5,3
yb = [np.min(xb), np.max(xb)]
tb = [t1[-1], t1[-1]]
po = opt["plot"]
po = myl.opt_default(po,{"legend_loc": "best",
"fs_legend": 35,
"fs": (20,12),
"fs_title": 40,
"fs_ylab": 30,
"fs_xlab": 30,
"title": "",
"xlab": "time",
"ylab": ""})
po["ls"] = {"o": "--k", "b": "-k", "s1": "-g", "s2": "-g",
"sc": "-r", "se": "-c"}
po["lw"] = {"o": lw2, "b": lw2, "s1": lw1, "s2": lw1, "sc": lw1, "se": lw2}
po["legend_order"] = ["o", "b", "s1", "s2", "sc", "se"]
po["legend_lab"] = {"o": "orig", "b": "bnd", "s1": "fit s1", "s2": "fit s2",
"sc": "fit joint", "se": "pred s2"}
myl.myPlot({"o": tc, "b": tb, "s1": t1, "s2": t2, "sc": tc, "se": t2},
{"o": xc, "b": yb, "s1": y1, "s2": y2, "sc": yc, "se": ye},
po)
return t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2
## features
def discont_feat(d,t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2,zp):
## reset
d["res"].append(y2[0]-y1[-1])
## y-RMSD between regression lines: 1-pre, 2-post, c-all
d["ry1"].append(myl.rmsd(yc1,y1))
d["ry2"].append(myl.rmsd(yc2,y2))
d["ryc"].append(myl.rmsd(yc,np.concatenate((y1,y2))))
## extrapolation y-RMSD
d["ry2e"].append(myl.rmsd(y2,ye))
## xy-RMSD between regression lines and input values: 1-pre, 2-post, c-all
rx1 = myl.rmsd(yc1,x1)
rx2 = myl.rmsd(yc2,x2)
rxc = myl.rmsd(yc,xc)
d["rx1"].append(rx1)
d["rx2"].append(rx2)
d["rxc"].append(rxc)
## xy-RMSD ratios of joint fit divided by single fits RMSD
# (the higher, the more discontinuity)
d["rr1"].append(myl.robust_div(rx1,myl.rmsd(y1,x1)))
d["rr2"].append(myl.robust_div(rx2,myl.rmsd(y2,x2)))
d["rrc"].append(myl.robust_div(rxc,myl.rmsd(np.concatenate((y1,y2)),xc)))
## rates
d["ra1"].append(drate(t1,y1))
d["ra2"].append(drate(t2,y2))
d["dlt_ra"].append(d["ra2"][-1]-d["ra1"][-1])
## means
d["m1"].append(np.mean(x1))
d["m2"].append(np.mean(x2))
d["dlt_m"].append(d["m2"][-1]-d["m1"][-1])
## variances
d["v1"].append(np.var(x1))
d["v2"].append(np.var(x2))
d["vc"].append(np.var(xc))
d["vr"].append(np.mean([d["v1"][-1],d["v2"][-1]])/d["vc"][-1])
d["dlt_v"].append(d["v2"][-1]-d["v1"][-1])
## shapes
d["s1_c3"].append(cu1[0])
d["s1_c2"].append(cu1[1])
d["s1_c1"].append(cu1[2])
d["s1_c0"].append(cu1[3])
d["s2_c3"].append(cu2[0])
d["s2_c2"].append(cu2[1])
d["s2_c1"].append(cu2[2])
d["s2_c0"].append(cu2[3])
d["eucl_c"].append(myl.dist_eucl(cu1,cu2))
rr = np.corrcoef(cu1,cu2)
d["corr_c"].append(rr[0,1])
d["dlt_c3"].append(d["s2_c3"][-1]-d["s1_c3"][-1])
d["dlt_c2"].append(d["s2_c2"][-1]-d["s1_c2"][-1])
d["dlt_c1"].append(d["s2_c1"][-1]-d["s1_c1"][-1])
d["dlt_c0"].append(d["s2_c0"][-1]-d["s1_c0"][-1])
d["eucl_y"].append(myl.dist_eucl(yu1,yu2))
rry = np.corrcoef(yu1,yu2)
d["corr_y"].append(rry[0,1])
## pause
if zp:
d["p"].append(0)
else:
d["p"].append(t2[0]-t1[-1])
return d
# returns declination rate of y over time t
# IN:
# t: time vector
# y: vector of same length
# OUT:
# r: change in y over time t
def drate(t,y):
if len(t)==0 or len(y)==0:
return np.nan
return (y[-1]-y[0])/(t[-1]/t[0])
# indices in t for which to derive discont values
# IN:
# t: all time stamps/indices
# ts: selected time stamps/indices, can be empty, 1-dim or 2-dim
# OUT:
# ii
# ==t-index pairs [[i-1, i]...] for i>=1, if ts empty
# ==index of [[infimum supremum]...] t-elements for ts stamps or intervals, else
# zp
# zero pause; True for 1-dim ts, False for 2-dim
def discont_tsi(t,ts):
ii = []
# return all index pairs [i-1, i]
if len(ts)==0:
for i in np.arange(1,len(t)):
ii = myl.push(ii,[i-1,i])
return ii
# zero pause
if myl.of_list_type(ts[0]):
zp = False
else:
zp = True
# return selected index pairs
for x in ts:
# supremum and infimum
if myl.of_list_type(x):
xi, xs = x[0], x[1]
else:
xi, xs = x, x
if xi==xs:
op = '<'
else:
op = '<='
sup = myl.find(t,'>=',xs)
inf = myl.find(t,op,xi)
if len(sup)==0 or len(inf)==0 or sup[0]==0 or inf[-1]==0:
continue
ii.append([inf[-1],sup[0]])
return ii, zp
# preceding, following segment indices around t[i]
# defined by opt[win|l]
# IN:
# t: 1- or 2-dim time array [timeStamp ...] or [[t_off t_on] ...], the latter
# accounting for pauses
# ii: current idx pair in t
# opt: cf discont
# OUT:
# i1, i2: pre/post boundary index arrays
# REMARK:
# i is part of i2
def discont_idx(t,ii,opt):
lx = len(t)
i, j = ii[0], ii[1]
# glob: preceding, following segment from start/till end
if opt['win']=='glob':
return np.arange(0,ii[0]), np.arange(ii[1],lx)
i1 = myl.find_interval(t,[t[i]-opt['l'], t[i]])
i2 = myl.find_interval(t,[t[j], t[j]+opt['l']])
return i1, i2
#### discontinuity analysis: some bugs, use discont() instead
# measures delta and linear fit discontinuities between
# adjacent array elements in terms of:
# - delta
# - reset of regression lines
# - root mean squared deviation between overall regression line and
# -- preceding segment's regression line
# -- following segment's regression line
# IN:
# x: nx2 array [[time val] ...]
# OR
# nx1 array [val ...]
# for the latter indices are taken as time stamps
# OUT:
# d: (n-1)x6 array [[residuum delta reset rms_total rms_pre rms_post] ...]
# d[i,] refers to discontinuity between x[i-1,] and x[i,]
# Example:
# >> import numpy as np
# >> import discont as ds
# >> x = np.random.rand(20)
# >> d = ds.discont(x)
def discont_deprec(x):
do_plot=False
# time: first column or indices
lx = len(x)
if np.ndim(x)==1:
t = np.arange(0,lx)
x = np.asarray(x)
else:
t = x[:,0]
x = x[:,1]
# output
d = np.asarray([])
# overall linear regression
c = myPolyfit(t,x,1)
y = np.polyval(c,t)
if do_plot:
fig = plot_newfig()
plt.plot(t,x,":b",t,y,"-r")
plt.show()
# residuums
resid = x-y
# deltas
ds = np.diff(x)
# linear fits
for i in np.arange(1,lx):
# preceding, following segment
i1, i2 = np.arange(0,i), np.arange(i,lx)
t1, t2, x1, x2 = t[i1], t[i2], x[i1], x[i2]
# linear fit coefs
c1 = myPolyfit(t1,x1,1)
c2 = myPolyfit(t2,x2,1)
# fit values
y1 = np.polyval(c1,t1)
y2 = np.polyval(c2,t2)
# reset
res = y2[0] - y1[-1]
# RMSD: pre, post, all
r1 = myl.rmsd(y[i1],y1)
r2 = myl.rmsd(y[i2],y2)
r12 = myl.rmsd(y,np.concatenate((y1,y2)))
# append to output
d = myl.push(d,[resid[i],ds[i-1],res,r1,r2,r12])
return d
# robust wrapper around polyfit to
# capture too short inputs
# IN:
# x
# y
# o: order <1>
# OUT:
# c: coefs
def myPolyfit(x,y,o=1):
if len(x)==0:
return np.zeros(o+1)
if len(x)<=o:
return myl.push(np.zeros(o),np.mean(y))
return np.polyfit(x,y,o)
# plot extracted yllable nuclei (can be plotted before pruning, too)
# IN:
# y: energy contour
# idx: ncl indices (in y)
def plot_sylncl(y,idx):
x_dict = {"y": myl.idx(y)}
y_dict = {"y": y}
r = [0,0.15]
opt = {"ls": {"y": "-k"}}
# over locmax idxs
for i in myl.idx(idx):
z = "s{}".format(i)
x_dict[z] = [idx[i], idx[i]]
y_dict[z] = r
opt["ls"][z] = "-b"
myl.myPlot(x_dict,y_dict,opt)
# init new figure with onclick->next, keypress->exit
# OUT:
# figureHandle
def plot_newfig():
fig = plt.figure()
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
return fig
# klick on plot -> next one
def onclick_next(event):
plt.close()
# press key -> exit
def onclick_exit(event):
sys.exit()
| mit |
jplourenco/bokeh | examples/compat/mpl/polycollection.py | 34 | 1276 | from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
# Generate data. In this case, we'll make a bunch of center-points and generate
# verticies by subtracting random offsets from those center-points
numpoly, numverts = 100, 4
centers = 100 * (np.random.random((numpoly, 2)) - 0.5)
offsets = 10 * (np.random.random((numverts, numpoly, 2)) - 0.5)
verts = centers + offsets
verts = np.swapaxes(verts, 0, 1)
# In your case, "verts" might be something like:
# verts = zip(zip(lon1, lat1), zip(lon2, lat2), ...)
# If "data" in your case is a numpy array, there are cleaner ways to reorder
# things to suit.
facecolors = ['red', 'green', 'blue', 'cyan', 'yellow', 'magenta', 'black']
edgecolors = ['cyan', 'yellow', 'magenta', 'black', 'red', 'green', 'blue']
widths = [5, 10, 20, 10, 5]
# Make the collection and add it to the plot.
col = PolyCollection(verts, facecolor=facecolors, edgecolor=edgecolors,
linewidth=widths, linestyle='--', alpha=0.5)
ax = plt.axes()
ax.add_collection(col)
plt.xlim([-60, 60])
plt.ylim([-60, 60])
plt.title("MPL-PolyCollection support in Bokeh")
output_file("polycollection.html")
show(mpl.to_bokeh())
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tests/test_expressions.py | 4 | 16414 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
import nose
import re
from numpy.random import randn
import operator
import numpy as np
from numpy.testing import assert_array_equal
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
import pandas.util.testing as tm
from numpy.testing.decorators import slow
if not expr._USE_NUMEXPR:
try:
import numexpr
except ImportError:
msg = "don't have"
else:
msg = "not using"
raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(), ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def tearDown(self):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
@nose.tools.nottest
def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
assert_series_equal, check_dtype=True)
@nose.tools.nottest
def run_binary_test(self, df, other, assert_func,
test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge',
'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operation %r" % arith)
com.pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic_test(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic_test(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False,
# **kwargs)
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True,
# **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic_test(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic_test(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.icol(0), self.integer.icol(0))
@slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.icol(0), self.frame2.icol(0))
@slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@slow
def test_panel4d(self):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic_test(self.frame.icol(0), self.frame.icol(0),
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic_test(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
self.run_arithmetic_test(self.integer.icol(0), self.integer.icol(0),
assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assertFalse(result)
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assertFalse(result)
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assertFalse(result)
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assertTrue(result)
def test_binary_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assertNotEqual(result, f._is_mixed_type)
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
assert_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]:
op = getattr(operator,op)
result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assertNotEqual(result, f11._is_mixed_type)
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
assert_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
for cond in [ True, False ]:
c = np.empty(f.shape,dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
assert_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df, df)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, True)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(False, df.a)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(False, df)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning():
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning():
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning():
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning():
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
xyguo/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
wzbozon/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
volodymyrss/3ML | threeML/plugins/SherpaLike.py | 2 | 7051 | import numpy as np
from sherpa.astro import datastack
from sherpa.models import TableModel
from threeML.plugin_prototype import PluginPrototype
import matplotlib.pyplot as plt
__instrument_name = "All OGIP compliant instruments"
class Likelihood2SherpaTableModel():
"""Creates from a 3ML Likelihhod model a table model that can be used in sherpa.
It should be used to convert a threeML.models.LikelihoodModel
into a sherpa.models.TableModel such that values are evaluated
at the boundaries of the energy bins for the pha data for which one wants to calculate
the likelihood.
Parameters
-----------
likelihoodModel : threeML.models.LikelihoodModel
model
"""
def __init__(self, likelihoodModel):
self.likelihoodModel = likelihoodModel
self.table_model = TableModel("table.source")
# fetch energies
self.e_lo = np.array(datastack.get_arf(1).energ_lo)
self.e_hi = np.array(datastack.get_arf(1).energ_hi)
# TODO figure out what to do if the binning is different across the datastack
self.table_model._TableModel__x = self.e_lo # according to Sherpa TableModel specs, TBV
# determine which sources are inside the ON region
self.onPtSrc = [] # list of point sources in the ON region
nPtsrc = self.likelihoodModel.getNumberOfPointSources()
for ipt in range(nPtsrc):
# TODO check if source is in the ON region?
self.onPtSrc.append(ipt)
self.onExtSrc = [] # list of extended sources in the ON region
nExtsrc = self.likelihoodModel.getNumberOfExtendedSources()
if nExtsrc > 0:
raise NotImplemented("Cannot support extended sources yet")
def update(self):
"""Update the model values.
"""
vals = np.zeros(len(self.table_model._TableModel__x))
for ipt in self.onPtSrc:
vals += [self.likelihoodModel.pointSources[ipt].spectralModel.photonFlux(bounds[0], bounds[1]) for bounds in
zip(self.e_lo, self.e_hi)]
# integrated fluxes over same energy bins as for dataset, according to Sherpa TableModel specs, TBV
self.table_model._TableModel__y = vals
class SherpaLike(PluginPrototype):
"""Generic plugin based on sherpa for data in OGIP format
Parameters
----------
name : str
dataset name
phalist : list of strings
pha file names
stat : str
statistics to be used
"""
def __init__(self, name, phalist, stat):
# load data and set statistics
self.name = name
self.ds = datastack.DataStack()
for phaname in phalist:
self.ds.load_pha(phaname)
# TODO add manual specs of bkg, arf, and rmf
datastack.ui.set_stat(stat)
# Effective area correction is disabled by default, i.e.,
# the nuisance parameter is fixed to 1
self.nuisanceParameters = {}
def set_model(self, likelihoodModel):
"""Set model for the source region
Parameters
----------
likelihoodModel : threeML.models.LikelihoodModel
sky model for the source region
"""
self.model = Likelihood2SherpaTableModel(likelihoodModel)
self.model.update() # to initialize values
self.model.ampl = 1.
self.ds.set_source(self.model.table_model)
def _updateModel(self):
"""Updates the sherpa table model"""
self.model.update()
self.ds.set_source(self.model.table_model)
def setEnergyRange(self, e_lo, e_hi):
"""Define an energy threshold for the fit
which is different from the full range in the pha files
Parameters
------------
e_lo : float
lower energy threshold in keV
e_hi : float
higher energy threshold in keV
"""
self.ds.notice(e_lo, e_hi)
def get_log_like(self):
"""Returns the current statistics value
Returns
-------------
statval : float
value of the statistics
"""
self._updateModel()
return -datastack.ui.calc_stat()
def get_name(self):
"""Return a name for this dataset set during the construction
Returns:
----------
name : str
name of the dataset
"""
return self.name
def get_nuisance_parameters(self):
"""Return a list of nuisance parameters.
Return an empty list if there are no nuisance parameters.
Not implemented yet.
"""
# TODO implement nuisance parameters
return self.nuisanceParameters.keys()
def inner_fit(self):
"""Inner fit. Just a hack to get it to work now.
Will be removed.
"""
# TODO remove once the inner fit requirement has been dropped
return self.get_log_like()
def display(self):
"""creates plots comparing data to model
"""
# datastack.ui.set_xlog()
# datastack.ui.set_ylog()
# self.ds.plot_data()
# self.ds.plot_model(overplot=True)
# TODO see if possible to show model subcomponents
f, axarr = plt.subplots(2, sharex=True)
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
energies = datastack.ui.get_data_plot(1).x
dlne = np.log(energies[1:]) - np.log(energies[:-1])
dlne = np.append(dlne[0], dlne) # TODO do this properly for arbitrary binning
de = np.power(10, np.log10(energies) + dlne) - np.power(10, np.log10(energies) - dlne)
# TODO figure out what to do if different binning within the ds
counts = np.zeros(len(energies))
model = np.zeros(len(energies))
bkg = np.zeros(len(energies))
for id in self.ds.ids:
counts += datastack.ui.get_data_plot(id).y * datastack.get_exposure(id) * de
model += datastack.ui.get_model_plot(id).y * datastack.get_exposure(id) * de
bkg += datastack.ui.get_bkg_plot(id).y * datastack.get_exposure(id) * de * datastack.ui.get_bkg_scale(id)
tot = model + bkg
axarr[0].errorbar(energies, counts, xerr=np.zeros(len(energies)), yerr=np.sqrt(counts), fmt='ko', capsize=0)
axarr[0].plot(energies, model, label='source')
axarr[0].plot(energies, bkg, label='background')
axarr[0].plot(energies, tot, label='total model')
leg = axarr[0].legend()
axarr[1].errorbar(energies[counts > 0], ((counts - tot) / tot)[counts > 0],
xerr=np.zeros(len(energies[counts > 0])), yerr=(np.sqrt(counts) / tot)[counts > 0], fmt='ko',
capsize=0)
axarr[1].plot(energies, np.zeros(len(energies)), color='k', linestyle='--')
axarr[0].set_xscale('log')
axarr[1].set_xscale('log')
axarr[0].set_yscale('log')
axarr[0].set_ylabel('counts')
axarr[1].set_ylabel('residuals (counts-model)/model')
axarr[1].set_xlabel("energy (keV)")
| bsd-3-clause |
cvjena/libmaxdiv | experiments/optimize_td_embedding.py | 1 | 13895 | """ Evaluates various methods for automatic optimization of time-delay embedding parameters. """
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pylab as plt
from collections import Counter
import argparse
from sklearn.gaussian_process import GaussianProcess
from maxdiv import maxdiv, maxdiv_util, preproc, eval
import datasets
def find_best_k(func, method, td_lag):
# Find embedding dimension which maximizes AP
k_best, ap_best, auc_best = 0, 0.0, 0.0
regions_best = []
for k in range(3, 21):
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
cur_ap = eval.average_precision([func['gt']], [detections])
cur_auc = eval.auc(func['gt'], detections, func['ts'].shape[1])
if (k_best == 0) or (cur_ap > ap_best) or ((cur_ap == ap_best) and (cur_auc > auc_best)):
k_best, ap_best, auc_best, regions_best = k, cur_ap, cur_auc, detections
return regions_best, k_best
def rank_aggregation(func, method, td_lag):
# Collect scores for all intervals with various embedding dimensions
regions = {}
for k in range(3, 21):
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None, overlap_th = 1.0,
td_dim = k, td_lag = td_lag)
for a, b, score in detections:
if (a, b) not in regions:
regions[(a, b)] = np.zeros(18)
regions[(a, b)][k - 3] = score
# Sort detections by Approximate Kemeny Rank Aggregation
# (an interval is preferred over another one if the majority of rankers does so)
detections = sorted(regions.keys(), key = lambda intvl: KemenyCompare(regions, intvl), reverse = True)
# Assign inverse rank as detection score
for i, (a, b) in enumerate(detections):
detections[i] = (a, b, len(detections) - i)
return maxdiv.find_max_regions(detections), 0
class KemenyCompare:
def __init__(self, regions, intvl):
self.regions = regions
self.intvl = intvl
def cmp(self, other):
return (self.regions[self.intvl] > self.regions[other.intvl]).sum() - (self.regions[self.intvl] < self.regions[other.intvl]).sum()
def __lt__(self, other):
return self.cmp(other) < 0
def __gt__(self, other):
return self.cmp(other) > 0
def __eq__(self, other):
return self.cmp(other) == 0
def __le__(self, other):
return self.cmp(other) <= 0
def __ge__(self, other):
return self.cmp(other) >= 0
def __ne__(self, other):
return self.cmp(other) != 0
def td_from_mi(func, method, td_lag):
# Determine Time Lag with minimum Mutual Information
k = min(range(2, int(0.05 * func['ts'].shape[1])), key = lambda k: mutual_information(func['ts'], 2, k - 1)) // td_lag
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_relative_mi(func, method, td_lag, th = 0.05):
# Determine Time Lag based on "normalized" Mutual Information
rmi = np.array([mutual_information(func['ts'], 2, d) for d in range(1, int(0.05 * func['ts'].shape[1]))])
rmi /= rmi[0]
drmi = np.convolve(rmi, [-1, 0, 1], 'valid')
if np.any(drmi <= th):
k = (np.where(drmi <= th)[0][0] + 3) // td_lag
else:
k = (drmi.argmin() + 3) // td_lag
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_mi_gradient(func, method, td_lag, th = 0.15):
th *= func['ts'].shape[0]
# Determine Time Lag based on the steepness of decrease of mutual information
mi = np.array([mutual_information(func['ts'], 2, d) for d in range(1, int(0.05 * func['ts'].shape[1]))])
dmi = np.convolve(mi, [-1, 0, 1], 'valid')
if np.any(dmi <= th):
k = (np.where(dmi <= th)[0][0] + 3) // td_lag
else:
k = (dmi.argmin() + 3) // td_lag
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_relative_ce(func, method, td_lag, th = 0.005):
# Determine Time Lag based on "normalized" Mutual Information
rce = np.array([conditional_entropy(func['ts'], d, td_lag) for d in range(1, int(0.05 * func['ts'].shape[1] / td_lag))])
rce /= rce[0]
drce = np.convolve(rce, [-1, 0, 1], 'valid')
if np.any(drce <= th):
k = (np.where(drce <= th)[0][0] + 2)
else:
k = (drce.argmin() + 2)
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_ce_gradient(func, method, td_lag, th = 0.001):
# Determine Time Lag based on the steepness of decrease of conditional entropy
ce = np.array([conditional_entropy(func['ts'], d, td_lag) for d in range(1, int(0.05 * func['ts'].shape[1] / td_lag))])
dce = np.convolve(ce, [-1, 0, 1], 'valid')
if np.any(dce <= th):
k = (np.where(dce <= th)[0][0] + 2)
else:
k = (dce.argmin() + 2)
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_length_scale(func, method, td_lag, factor = 0.3):
# Determine Length Scale of Gaussian Process
ls = length_scale(func['ts'])
# Set Embedding Dimension
k = int(max(1, min(0.05 * func['ts'].shape[1], round(factor * ls / td_lag))))
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def td_from_false_neighbors(func, method, td_lag, Rtol = 1.0, Ntol = 0.001):
d, n = func['ts'].shape
Rtol2 = Rtol * Rtol
# Determine embedding dimension based on false nearest neighbors
dist = maxdiv_util.calc_distance_matrix(func['ts'])
cumdist = dist.copy()
fnn = []
max_k = int(0.05 * func['ts'].shape[1])
for k in range(1, max_k + 1):
cur_fnn = 0
for i in range(n - 1):
for j in range(i + 1, n):
id = max(0, i - k * td_lag)
jd = max(0, j - k * td_lag)
if dist[id, jd] / cumdist[i, j] > Rtol2:
cur_fnn += 1
cumdist[i, j] += dist[id, jd]
fnn.append(cur_fnn)
if (len(fnn) >= 3) and (abs(fnn[-3] - fnn[-1]) <= Ntol * abs(fnn[0] - fnn[2])):
k -= 2
break
# Detect regions
detections = maxdiv.maxdiv(func['ts'], method = method, mode = 'I_OMEGA',
extint_min_len = 20, extint_max_len = 100, num_intervals = None,
td_dim = k, td_lag = td_lag)
return detections, k
def mutual_information(ts, k, T = 1):
d, n = ts.shape
if (k < 2) or (T < 1):
# Entropy as a special case of MI
cov = np.cov(ts)
if d > 1:
return (d * (np.log(2 * np.pi) + 1) + np.linalg.slogdet(cov)[1]) / 2
else:
return (np.log(2 * np.pi) + 1 + np.log(cov)) / 2
# Time-Delay Embedding with the given embedding dimension and time lag
embed_func = np.vstack([ts[:, ((k - i - 1) * T):(n - i * T)] for i in range(k)])
# Compute parameters of the joint and the marginal distributions assuming a normal distribution
cov = np.cov(embed_func)
cov_indep = cov.copy()
cov_indep[:d, d:] = 0
cov_indep[d:, :d] = 0
# Compute KL divergence between p(x_t, x_(t-T), ..., x_(t - (k-1)*T)) and p(x_t)*p(x_(t-L), ..., x_(t - (k-1)*T))
return (np.linalg.inv(cov_indep).dot(cov).trace() + np.linalg.slogdet(cov_indep)[1] - np.linalg.slogdet(cov)[1] - embed_func.shape[0]) / 2
def conditional_entropy(ts, k, T = 1):
d, n = ts.shape
if (k < 2) or (T < 1):
# Entropy as a special case
cov = np.cov(ts)
if d > 1:
return (d * (np.log(2 * np.pi) + 1) + np.linalg.slogdet(cov)[1]) / 2
else:
return (d * (np.log(2 * np.pi) + 1) + np.log(cov)) / 2
# Time-Delay Embedding with the given embedding dimension and time lag
embed_func = np.vstack([ts[:, ((k - i - 1) * T):(n - i * T)] for i in range(k)])
# Compute parameters of the joint and the conditioned distributions assuming a normal distribution
cov = np.cov(embed_func)
cond_cov = cov[:d, :d] - cov[:d, d:].dot(np.linalg.inv(cov[d:, d:]).dot(cov[d:, :d]))
# Compute the conditional entropy H(x_t | x_(t-T), ..., x_(t - (k-1)*T))
return (d * (np.log(2 * np.pi) + 1) + np.linalg.slogdet(cond_cov)[1]) / 2
def length_scale(ts):
X = np.linspace(0, 1, ts.shape[1], endpoint = True).reshape(ts.shape[1], 1)
GP = GaussianProcess(thetaL = 0.1, thetaU = 1000, nugget = 1e-8, normalize = False)
GP.fit(X, ts.T)
return np.sqrt(0.5 / GP.theta_.flat[0]) * ts.shape[1]
# Constants
optimizers = {
'best_k' : find_best_k,
'rank_aggregation' : rank_aggregation,
'mi' : td_from_mi,
'rmi' : td_from_relative_mi,
'dmi' : td_from_mi_gradient,
'ce' : td_from_ce_gradient,
'rce' : td_from_relative_ce,
'gp_ls' : td_from_length_scale,
'fnn' : td_from_false_neighbors
}
# Parameters
parser = argparse.ArgumentParser(description = 'Evaluate various methods for automatic optimization of time-delay embedding parameters.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--method', help='MaxDiv method', choices = maxdiv.get_available_methods() + ['gaussian_ts'], default = 'gaussian_ts')
parser.add_argument('--optimizer', help='Optimization method for Time-Delay Embedding', choices = optimizers.keys(), default = 'best_k')
parser.add_argument('--plot', action='store_true', help='Plot histograms of embedding dimensions for each extreme type')
parser.add_argument('--datasets', help='datasets to be loaded', nargs='+', default=['synthetic'])
parser.add_argument('--subsets', help='subsets of the datasets to be tested', nargs='+',default=[])
parser.add_argument('--td_lag', help='Time-Lag for Time-Delay Embedding', default=1, type=int)
parser.add_argument('--dump', help='Dump detections for each time-series to the specified CSV file', default='')
args = parser.parse_args()
# Load data
data = datasets.loadDatasets(args.datasets)
ftypes = args.subsets if len(args.subsets) > 0 else data.keys()
# Find the best embedding dimension for every single time series
aucs = {}
aps = {}
all_ids = []
all_gt = []
all_regions = []
best_k = {}
for ftype in ftypes:
print('-- {} --'.format(ftype))
func_ids = []
ygts = []
regions = []
aucs[ftype] = []
best_k[ftype] = Counter()
for i, func in enumerate(data[ftype]):
func_ids.append('{}_{:03d}'.format(ftype, i))
ygts.append(func['gt'])
det, k_best = optimizers[args.optimizer](func, args.method, args.td_lag)
# Divide scores by maximum score since their range differs widely depending on the dimensionality
if args.method not in ('gaussian_cov_ts', 'gaussian_ts'):
for r in range(len(det) - 1, -1, -1):
det[r] = (det[r][0], det[r][1], det[r][2] / det[0][2])
regions.append(det)
aucs[ftype].append(eval.auc(func['gt'], det, func['ts'].shape[1]))
best_k[ftype][k_best] += 1
print ("Best k: {}".format(k_best))
aps[ftype] = eval.average_precision(ygts, regions)
print ("AP: {}".format(aps[ftype]))
if args.plot:
plt.bar(np.array(list(best_k[ftype].keys())) - 0.5, list(best_k[ftype].values()), 1)
plt.title(ftype)
plt.show()
all_ids += func_ids
all_regions += regions
all_gt += ygts
print('-- Best k --')
for ftype, counts in best_k.items():
print('{}: {} ({} - {})'.format(ftype, max(counts.keys(), key = lambda k: counts[k]), min(counts.keys()), max(counts.keys())))
print('-- Aggregated AUC --')
for ftype in aucs:
print ("{}: {} (+/- {})".format(ftype, np.mean(aucs[ftype]), np.std(aucs[ftype])))
print('-- Average Precision --')
for ftype in aps:
print ("{}: {}".format(ftype, aps[ftype]))
print ("OVERALL AP: {}".format(eval.average_precision(all_gt, all_regions)))
# Dump detections
if args.dump:
with open(args.dump, 'w') as dumpFile:
dumpFile.write('Func,Start,End,Score\n')
for id, regions in zip(all_ids, all_regions):
for a, b, score in regions:
dumpFile.write('{},{},{},{}\n'.format(id, a, b, score))
| lgpl-3.0 |
jpn--/larch | larch/prelearning.py | 1 | 11269 |
import logging
import numpy
import pandas
import os
from appdirs import user_cache_dir
import joblib
from typing import MutableMapping
from .general_precision import l4_float_dtype
from .log import logger_name
from .dataframes import DataFrames
def user_cache_file(filename, appname=None, appauthor=None, version=None, opinion=True):
d = user_cache_dir(appname=appname, appauthor=appauthor, version=version, opinion=opinion)
os.makedirs(d, exist_ok=True)
return os.path.join(d, filename)
class Prelearner():
"""
A prelearner for use with Larch.
A prelearner uses a machine learning classifier to make an initial
prediction of the result. This initial prediction is then added
as an input data column for Larch, effectively creating a chained
classifier.
Parameters
----------
training_X : pandas.DataFrame
The exogenous variables.
training_Y : pandas.DataFrame
The observed choices in the training data.
training_W : pandas.DataFrame, optional
The weights.
classifier : sklearn Classifier or Regressor
This is the class object for the selected classifier, not
an existing instance. This classifier or Regressor will be
instantiated and trained using the data above to generate
the prediction.
fit : dict or False, optional
A dictionary of arguments to pass to the `fit` method of the
classifier, or set to False to not fit the classifier
during the initialization of this object.
cache_file : str, optional
A cache file name to store the trained prelearner. If just a filename is given,
it will be stored in `appdirs.user_cache_file()`. If instead an absolute path or
a relative path beginning with '.' is given, that location will be used.
If the file exists, it will be loaded instead of re-training.
output_name : str, default 'prelearned_utility'
The name of the output column from this prelearner.
grid_cv_params : dict or List[dict], optional
If given, this is used as the `param_grid` argument
to initialize a :class:`sklearn.model_selection.GridSearchCV`
wrapped around the classifier, instead of using the
classifier directly.
grid_cv_kwds : dict, optional
If `grid_cv_params` is given, this dict gives other keyword
arguments given to :class:`sklearn.model_selection.GridSearchCV`.
**kwargs
Any other keyword arguments are passed through to the classifier's
constructor.
"""
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
classifier=None,
fit=True,
cache_file=None,
output_name='prelearned_utility',
appname='larch',
grid_cv_params=None,
grid_cv_kwds=None,
validation_dataframes=None,
**kwargs,
):
if classifier is None:
raise ValueError('must give a classifier')
if fit is True:
fit = {}
logger = logging.getLogger(logger_name)
self.input_ca_columns = ca_columns if ca_columns is not None else []
self.input_co_columns = co_columns
self.eval_set_names = fit.pop('eval_set_names', [])
if isinstance(fit, MutableMapping):
if 'validation_percent' in fit and validation_dataframes is None:
vpct = fit.pop('validation_percent')
dataframes, validation_dataframes = dataframes.split([100-vpct, vpct])
else:
vpct = 'preset'
if validation_dataframes is not None:
validation_X = self.filter_and_join_columns(
validation_dataframes.data_ca_as_ce(),
validation_dataframes.data_co,
)
validation_Y = validation_dataframes.array_ch_as_ce()
validation_W = validation_dataframes.array_wt_as_ce()
fit['eval_set'] = fit.get('eval_set', []) + [(validation_X, validation_Y)]
if validation_W is not None:
fit['sample_weight_eval_set'] = fit.get('sample_weight_eval_set', []) + [validation_W]
self.eval_set_names += [f'validation_{vpct}']
training_X = self.filter_and_join_columns(
dataframes.data_ca_as_ce(),
dataframes.data_co,
)
training_Y = dataframes.array_ch_as_ce()
training_W = dataframes.array_wt_as_ce()
self.output_column = output_name
if cache_file is not None:
if os.path.isabs(cache_file) or cache_file[:2] in ('./', '..', '.\\'):
cache_clf_file = cache_file
else:
cache_clf_file = user_cache_file(cache_file, appname=appname)
else:
cache_clf_file = None
if cache_clf_file is not None and os.path.exists(cache_clf_file):
logger.info(f'LOADING {cache_clf_file}...')
clf = joblib.load(cache_clf_file)
logger.info(f'COMPLETED LOADING {cache_clf_file}')
else:
if grid_cv_params is not None:
from sklearn.model_selection import GridSearchCV
clf = GridSearchCV(
classifier(**kwargs),
grid_cv_params,
**grid_cv_kwds,
)
else:
clf = classifier(**kwargs)
if fit is not False:
if 'train_as_eval' in fit:
fit.pop('train_as_eval')
fit['eval_set'] = [(training_X, training_Y),] + fit.get('eval_set',[])
if training_W is not None:
fit['sample_weight_eval_set'] = [training_W,]+fit.get('sample_weight_eval_set',[])
self.eval_set_names = ['training'] + self.eval_set_names
logger.info(f'FITTING {classifier}...')
if training_W is not None:
clf.fit(training_X, training_Y, sample_weight=training_W, **fit)
else:
clf.fit(training_X, training_Y, **fit)
logger.info(f'FITTED {classifier}')
if cache_clf_file is not None:
joblib.dump(clf, cache_clf_file)
logger.info(f'SAVED {cache_clf_file}')
self.clf = clf
self._predict_type = 'predict_proba col 1'
def filter_ca_columns(self, X):
# filter the columns of the input into the correct form for the prelearner.
try:
X1 = X[self.input_ca_columns]
except KeyError:
X1 = pandas.DataFrame(
X.eval(self.input_ca_columns).T.astype(float),
index=X.index,
columns=self.input_ca_columns,
)
return X1
def filter_and_join_columns(self, X_ca, X_co):
training_X = self.filter_ca_columns(X_ca)
if self.input_co_columns:
try:
X_co = X_co[self.input_co_columns]
except KeyError:
X_co = pandas.DataFrame(
X_co.eval(self.input_co_columns).T.astype(float),
index=X_co.index,
columns=self.input_co_columns,
)
training_X = training_X.join(X_co, on=training_X.index.levels[0].name, how='left').fillna(0)
return training_X
def apply(
self,
X,
dtype=None,
output_name=None,
**kwargs,
):
"""
Apply the prelearner to compute pseudo-utility.
Parameters
----------
X : pandas.DataFrame
dtype : dtype, default float
The dtype to use for the output column.
output_name : str, optional
The name of the output column from this
application of the prelearner.
**kwargs
Other keyword arguments are forwarded to the
`predict` or `predict_proba` method of the
`clf` member.
Returns
-------
pandas.DataFrame
"""
if dtype is None:
dtype = l4_float_dtype
if isinstance(X, DataFrames):
X_ca = X._data_ca_or_ce
X_co = X.data_co
else:
X_ca = X
X_co = None
X_in = self.filter_and_join_columns(
X_ca,
X_co,
)
if output_name is None:
output_name = self.output_column
if self._predict_type == 'predict_proba col 1':
X_ca.loc[:,output_name] = numpy.log(self.clf.predict_proba(X_in, **kwargs)[:, 1]).astype(dtype)
elif self._predict_type == 'predict':
X_ca.loc[:,output_name] = numpy.log(self.clf.predict(X_in, **kwargs)).astype(dtype)
else:
raise TypeError(self._predict_type)
return X
class RandomForestPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
default_kwargs = dict(
n_estimators=200,
warm_start=False,
max_features=None,
oob_score=True,
n_jobs=-1,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=RandomForestClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
class XGBoostHardPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBClassifier
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
silent=True,
objective='binary:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
class XGBoostSoftPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBRegressor
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
silent=True,
objective='reg:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBRegressor,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
self._predict_type = 'predict'
class XGBoostPrelearner(Prelearner):
def __init__(
self,
dataframes,
ca_columns=None,
co_columns=None,
cache_file=None,
fit=True,
output_name='prelearned_utility',
**kwargs,
):
from xgboost import XGBRegressor, XGBClassifier
training_Y = dataframes.array_ch_as_ce()
use_soft = numpy.any((training_Y != 0) & (training_Y != 1.0))
default_kwargs = dict(
max_depth=11,
learning_rate=0.01,
n_estimators=500,
silent=True,
objective='reg:logistic' if use_soft else 'binary:logistic',
booster='gbtree',
n_jobs=-1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=123,
)
default_kwargs.update(kwargs)
super().__init__(
dataframes=dataframes,
ca_columns=ca_columns,
co_columns=co_columns,
classifier=XGBRegressor if use_soft else XGBClassifier,
fit=fit,
cache_file=cache_file,
output_name=output_name,
**default_kwargs,
)
self._predict_type = 'predict' if use_soft else 'predict_proba col 1'
def evals_result(self):
j = [
pandas.DataFrame({mk:numpy.asarray(mv) for mk, mv in ev.items()})
for ek, ev in self.clf.evals_result_.items()
]
k = [
ek
for ek, ev in self.clf.evals_result_.items()
]
for i in range(len(self.eval_set_names)):
if len(k)>i:
k[i] = self.eval_set_names[i]
return pandas.concat(j, axis=1, keys=k, sort=False) | gpl-3.0 |
dinossimpson/pyspeckit | pyspeckit/spectrum/models/_formaldehyde.py | 3 | 13178 | """
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
import matplotlib.cbook as mpcb
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10','oneone_f01','oneone_f22','oneone_f21','oneone_f12','oneone_f11']
# http://articles.adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48848e9,
'threethree': 28.97480e9,
}
relative_strength_theory={
'oneone_f10': 4,
'oneone_f01': 4,
'oneone_f22':15,
'oneone_f21': 5,
'oneone_f12': 5,
'oneone_f11': 3,
'twotwo_f11':1,
'twotwo_f12':1,
'twotwo_f21':1,
'twotwo_f32':1,
'twotwo_f33':1,
'twotwo_f22':1,
'twotwo_f23':1,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48846e9,
'twotwo_f12':14.48847e9,
'twotwo_f21':14.48848e9,
'twotwo_f32':14.48848e9,
'twotwo_f33':14.48848e9,
'twotwo_f22':14.48849e9,
'twotwo_f23':14.48849e9,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict.update(hf_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={
'oneone_f10': 18.53e3/4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f01': 1.34e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f22': 0.35e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f21': 4.05e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f12': 6.48e3 /4.82965996e9 * units.speedoflight_ms / 1000.0,
'oneone_f11': 11.08e3/4.82965996e9 * units.speedoflight_ms / 1000.0,
'twotwo_f11':14.48846e9,
'twotwo_f12':14.48847e9,
'twotwo_f21':14.48848e9,
'twotwo_f32':14.48848e9,
'twotwo_f33':14.48848e9,
'twotwo_f22':14.48849e9,
'twotwo_f23':14.48849e9,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
class formaldehyde_model(object):
def __init__(self):
self.npeaks = 1
self.npars = 6
pass
def formaldehyde(self, xarr, xunits='GHz', amp=1.0, width=1.0,
xoff_v=0.0, line='oneone'):
"""
Generate a model Formaldehyde spectrum based on input temperatures, column, and
gaussian parameters
(not implemented) if tau11 is specified, Ntot is ignored
"""
# Convert X-units to frequency in GHz
if xunits in units.frequency_dict:
xarr = np.copy(xarr) * units.frequency_dict[xunits] / units.frequency_dict['GHz']
elif xunits in units.velocity_dict:
if line in freq_dict:
xarr = (freq_dict[line] - (np.copy(xarr) *
(units.velocity_dict[xunits] / units.velocity_dict['m/s'] / units.speedoflight_ms) *
freq_dict[line]) ) / units.frequency_dict['GHz']
else:
raise Exception("Xunits is velocity-type (%s) but line %s is not in the list." % (xunits,line))
else:
raise Exception("xunits not recognized: %s" % (xunits))
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
runspec = np.zeros(len(xarr))
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# strength array
runspec += (1-relative_strength_theory[linename]*amp*\
np.exp(-(xarr+nuoff-freq_dict[linename])**2/(2*nuwidth**2)))
return runspec
def n_formaldehyde(self, pars=None, fittau=False, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,Ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 6n, assuming tkin,tex,Ntot,width,xoff_v,fortho repeated
"""
if len(pars) % 6 == 0:
tkin = [pars[ii] for ii in xrange(0,len(pars),6)]
tex = [pars[ii] for ii in xrange(1,len(pars),6)]
Ntot = [pars[ii] for ii in xrange(2,len(pars),6)]
width = [pars[ii] for ii in xrange(3,len(pars),6)]
xoff_v = [pars[ii] for ii in xrange(4,len(pars),6)]
fortho = [pars[ii] for ii in xrange(5,len(pars),6)]
elif not(len(tkin) == len(tex) == len(Ntot) == len(xoff_v) == len(width) == len(fortho)):
raise ValueError("Wrong array lengths!")
modelkwargs = kwargs.copy()
def L(x):
v = np.zeros(len(x))
for i in range(len(tkin)):
modelkwargs.update({'tkin':tkin[i], 'tex':tex[i],
'width':width[i], 'xoff_v':xoff_v[i],
'fortho':fortho[i]})
if fittau:
modelkwargs.update({'tau11':Ntot[i]})
else:
modelkwargs.update({'Ntot':Ntot[i]})
v += self.ammonia(x,**modelkwargs)
return v
return L
def multinh3fit(self, xax, data, npeaks=1, err=None, params=[20,20,1e10,1.0,0.0,0.5],
fixed=[False,False,False,False,False,False],
limitedmin=[True,True,True,True,False,True],
limitedmax=[False,False,False,False,False,True], minpars=[2.73,2.73,0,0,0,0],
maxpars=[0,0,0,0,0,1], quiet=True, shh=True, veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, Gfwhm, Lfwhm] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
self.npars = 6
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == self.npars:
parlist *= npeaks
elif parlist==params:
parlist[:] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
parlist[:] = [False,False,False,False,False,False] * npeaks
elif parlist==limitedmax:
parlist[:] = [False,False,False,False,False,True] * npeaks
elif parlist==limitedmin:
parlist[:] = [True,True,True,True,False,True] * npeaks
elif parlist==minpars:
parlist[:] = [2.73,0,0,0,0,0] * npeaks
elif parlist==maxpars:
parlist[:] = [0,0,0,0,0,1] * npeaks
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, **kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, **kwargs)(x))/err]
return f
parnames = {0:"TKIN",1:"TEX",2:"NTOT",3:"WIDTH",4:"XOFF_V",5:"FORTHO"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%self.npars]+str(ii/self.npars),
'mpmaxstep':0,'error':ii}
for ii in xrange(len(params)) ]
parinfo[0]['mpmaxstep'] = 1.0
parinfo[1]['mpmaxstep'] = 1.0
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Fit message: ",mp.errmsg
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
if mpp[1] > mpp[0]: mpp[1] = mpp[0] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)
self.mp = mp
self.mpp = mpp
self.mpperr = mpperr
self.model = self.n_ammonia(pars=mpp,**kwargs)(xax)
return mpp,self.n_ammonia(pars=mpp,**kwargs)(xax),mpperr,chi2
__call__ = multinh3fit
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, NTOT, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
label_list = [ (
"$T_K(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[0+jj*self.npars],self.mpperr[0+jj*self.npars]),
"$T_{ex}(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[1+jj*self.npars],self.mpperr[1+jj*self.npars]),
"$N$(%i)=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[2+jj*self.npars],self.mpperr[2+jj*self.npars]),
"$\\sigma(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[3+jj*self.npars],self.mpperr[3+jj*self.npars]),
"$v(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[4+jj*self.npars],self.mpperr[4+jj*self.npars]),
"$F_o(%i)$=%6.4g $\\pm$ %6.4g" % (jj,self.mpp[5+jj*self.npars],self.mpperr[5+jj*self.npars])
) for jj in range(self.npeaks)]
labels = tuple(mpcb.flatten(label_list))
return labels
| mit |
jdwittenauer/ionyx | ionyx/contrib/prophet_regressor.py | 1 | 5457 | import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin
from fbprophet import Prophet
class ProphetRegressor(BaseEstimator, RegressorMixin):
"""
Scikit-learn wrapper for the Prophet forecaster.
Parameters
----------
growth: String 'linear' or 'logistic' to specify a linear or logistic
trend.
changepoints: List of dates at which to include potential changepoints. If
not specified, potential changepoints are selected automatically.
n_changepoints: Number of potential changepoints to include. Not used
if input `changepoints` is supplied. If `changepoints` is not supplied,
then n_changepoints potential changepoints are selected uniformly from
the first 80 percent of the history.
yearly_seasonality: Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
weekly_seasonality: Fit weekly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
daily_seasonality: Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday.
seasonality_prior_scale: Parameter modulating the strength of the
seasonality model. Larger values allow the model to fit larger seasonal
fluctuations, smaller values dampen the seasonality. Can be specified
for individual seasonalities using add_seasonality.
holidays_prior_scale: Parameter modulating the strength of the holiday
components model, unless overridden in the holidays input.
changepoint_prior_scale: Parameter modulating the flexibility of the
automatic changepoint selection. Large values will allow many
changepoints, small values will allow few changepoints.
mcmc_samples: Integer, if greater than 0, will do full Bayesian inference
with the specified number of MCMC samples. If 0, will do MAP
estimation.
interval_width: Float, width of the uncertainty intervals provided
for the forecast. If mcmc_samples=0, this will be only the uncertainty
in the trend using the MAP estimate of the extrapolated generative
model. If mcmc.samples>0, this will be integrated over all model
parameters, which will include uncertainty in seasonality.
uncertainty_samples: Number of simulated draws used to estimate
uncertainty intervals.
Attributes
----------
model_ : object
Underlying Prophet model.
preds_ : object
Prediction DataFrame returned by the Prophet model after forecasting.
Contains various extra columns that may be useful.
"""
def __init__(
self,
growth='linear',
changepoints=None,
n_changepoints=25,
yearly_seasonality='auto',
weekly_seasonality='auto',
daily_seasonality='auto',
holidays=None,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
changepoint_prior_scale=0.05,
mcmc_samples=0,
interval_width=0.80,
uncertainty_samples=1000):
self.growth = growth
self.changepoints = changepoints
self.n_changepoints = n_changepoints
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.holidays = holidays
self.seasonality_prior_scale = seasonality_prior_scale
self.holidays_prior_scale = holidays_prior_scale
self.changepoint_prior_scale = changepoint_prior_scale
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
self.model_ = None
self.preds_ = None
def fit(self, X, y):
"""
Fit the Prophet forecast model.
Parameters
----------
X : array, shape = (n_samples,)
Dates.
y : array, shape = (n_samples,)
Time series values.
Returns
-------
self : Returns an instance of self.
"""
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.DataFrame):
y = y.values
data = pd.DataFrame(X, columns=['ds'])
data['y'] = y
self.model_ = Prophet(**self.get_params())
self.model_.fit(data)
return self
def predict(self, X):
"""
Predict using the Prophet forecast model.
Parameters
----------
X : array, shape = (n_samples,)
Dates to generate predictions.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
if isinstance(X, pd.DataFrame):
X = X.values
data = pd.DataFrame(X, columns=['ds'])
self.preds_ = self.model_.predict(data)
return self.preds_['yhat'].values
| apache-2.0 |
nkmk/python-snippets | notebook/numpy_genfromtxt.py | 1 | 1553 | import numpy as np
with open('data/src/sample_nan.csv') as f:
print(f.read())
# 11,12,,14
# 21,,,24
# 31,32,33,34
# a = np.loadtxt('data/src/sample_nan.csv', delimiter=',')
# ValueError: could not convert string to float:
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(a)
# [[11. 12. nan 14.]
# [21. nan nan 24.]
# [31. 32. 33. 34.]]
print(a[0, 2])
# nan
print(type(a[0, 2]))
# <class 'numpy.float64'>
with open('data/src/sample_pandas_normal.csv') as f:
print(f.read())
# name,age,state,point
# Alice,24,NY,64
# Bob,42,CA,92
# Charlie,18,CA,70
# Dave,68,TX,70
# Ellen,24,CA,88
# Frank,30,NY,57
a = np.loadtxt('data/src/sample_pandas_normal.csv', delimiter=',', skiprows=1,
dtype={'names': ('name', 'age', 'state', 'point'),
'formats': ('<U7', '<i8', '<U2', '<i8')})
print(type(a))
# <class 'numpy.ndarray'>
print(a)
# [('Alice', 24, 'NY', 64) ('Bob', 42, 'CA', 92) ('Charlie', 18, 'CA', 70)
# ('Dave', 68, 'TX', 70) ('Ellen', 24, 'CA', 88) ('Frank', 30, 'NY', 57)]
print(a.dtype)
# [('name', '<U7'), ('age', '<i8'), ('state', '<U2'), ('point', '<i8')]
a = np.genfromtxt('data/src/sample_pandas_normal.csv', delimiter=',',
names=True, dtype=None, encoding='utf-8')
print(type(a))
# <class 'numpy.ndarray'>
print(a)
# [('Alice', 24, 'NY', 64) ('Bob', 42, 'CA', 92) ('Charlie', 18, 'CA', 70)
# ('Dave', 68, 'TX', 70) ('Ellen', 24, 'CA', 88) ('Frank', 30, 'NY', 57)]
print(a.dtype)
# [('name', '<U7'), ('age', '<i8'), ('state', '<U2'), ('point', '<i8')]
| mit |
sankar-mukherjee/CoFee | laurent/cluster.py | 1 | 9983 |
#Different sklearn clustering analysis techniques and plotting and dimension reduction. dataprep.py must be run before.
X_train, y_train = get_clusterdata(data,REAL_POS_FEAT+REAL_ACO_FEAT,'simple',WORKING_DIR + 'cluster_classifier.png')
#full features
#X_train, y_train = get_clusterdata(data,REAL_POS_FEAT+REAL_ACO_FEAT,'baseFun0.65',WORKING_DIR+'cluster_classifier.png')
reduced_data = X_train
labels = y_train
######################################dimension reduction #################################
#################### PCA varience cpture
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
reduced_data = pca.fit(X_train).transform(X_train)
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
#################### ##################### LDA
from sklearn.lda import LDA
lda = LDA(n_components=3)
scale = lda.fit(X_train,y_train)
reduced_data = scale.transform(X_train)
####################################### ICA ####################################
from sklearn.decomposition import FastICA
# Compute ICA
ica = FastICA(n_components=3)
reduced_data = ica.fit_transform(X_train) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
#######################################################plot 3D pca with 3 components #################
import mpl_toolkits.mplot3d.axes3d as p3
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.plot3D(reduced_data[:, 0], reduced_data[:, 1], reduced_data[:, 2],'.')
#########################################clustering ################################
############ K means
le = preprocessing.LabelEncoder()
le.fit(y_train)
le.classes_
ori_label = le.transform(y_train)
kmeans = KMeans(init='k-means++',n_clusters=11, n_init=1)
kmeans.fit(X_train)
reduced_data = kmeans.predict(X_train)
pred_label = kmeans.labels_
bench_k_means(kmeans,name="k-means++",data=X_train)
pred = reduced_data - ori_label
np.count_nonzero(pred)
kmeans = KMeans(init='k-means++',n_init=10)
kmeans.fit(reduced_data)
bench_k_means(KMeans(init='k-means++', n_init=10, n_clusters= 2),
name="k-means++", data=reduced_data)
bench_k_means(KMeans(init='random', n_clusters=10, n_init=10),
name="random", data=reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min(), reduced_data[:, 0].max()
y_min, y_max = reduced_data[:, 1].min(), reduced_data[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
## plot
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
plt.show()
############################ Compute DBSCAN
from sklearn.cluster import DBSCAN
bench_k_means(DBSCAN(eps=0.01, min_samples=10), name="DBSCAN", data=reduced_data)
db = DBSCAN(eps=0.01, min_samples=10).fit(reduced_data)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = reduced_data[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = reduced_data[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
################################### Compute Affinity Propagation###################################
from sklearn.cluster import AffinityPropagation
af = AffinityPropagation(preference=-50).fit(reduced_data)
cluster_centers_indices = af.cluster_centers_indices_
labels_true = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(reduced_data, labels, metric='sqeuclidean'))
# Plot result
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = reduced_data[cluster_centers_indices[k]]
plt.plot(reduced_data[class_members, 0], reduced_data[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in reduced_data[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
################################# Compute clustering with MeanShift###############################
from sklearn.cluster import MeanShift, estimate_bandwidth
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(reduced_data, quantile=0.1, n_samples=100)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(reduced_data)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
bench_k_means(ms, name="MeanShift", data=reduced_data)
# Plot result
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(reduced_data[my_members, 0], reduced_data[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
################################# AgglomerativeClustering #############
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(reduced_data, 30, mode='distance')
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(reduced_data)
elapsed_time = time.time() - t0
plt.scatter(reduced_data[:, 0], reduced_data[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
################################### Birch #######################################
# Compute clustering with Birch with and without the final clustering step
# and plot.
from sklearn.cluster import Birch
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
birch_model.fit(reduced_data)
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(reduced_data[mask, 0], reduced_data[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
| apache-2.0 |
herilalaina/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 38 | 4126 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
kikocorreoso/scikit-extremes | skextremes/models/engineering.py | 1 | 31949 | """
This module contains algorithms found in the literature and used extensively in
some fields.
The following paragraphs have been adapted from
`Makonnen, 2006 <http://journals.ametsoc.org/doi/pdf/10.1175/JAM2349.1>`_
The return period of an event of a specific large magnitude is of fundamental
interest. All evaluations of the risks of extreme events require methods to
statistically estimate their return periods from the measured data. Such
methods are widely used in building codes and regulations concerning the design
of structures and community planning, as examples. Furthermore, it is crucial
for the safety and economically optimized engineering of future communities to
be able to estimate the changes in the frequency of various natural hazards
with climatic change, and analyzing trends in the weather extremes.
The return period :math:`R` (in years) of an event is related to the
probability :math:`P` of not exceeding this event in one year by
.. math:: R=\\frac{1}{1 - P}
A standard method to estimate :math:`R` from measured data is the
following. One first ranks the data, typically annual extremes or values over a
threshold, in increasing order of magnitude from the smallest :math:`m = 1` to
the largest :math:`m = N` and associates a cumulative probability :math:`P` to
each of the mth smallest values. Second, one fits a line to the
ranked values by some fitting procedure. Third, one interpolates or
extrapolates from the graph so that the return period of the extreme value of
interest is estimated.
Basically, this extreme value analysis method, introduced by Hazen (1914), can
be applied directly by using arithmetic paper. However, interpolation and
extrapolation can be made more easily when the points fall on a straight line,
which is rarely the case in an order-ranked plot of a physical variable on
arithmetic paper. Therefore, almost invariably, the analysis is made by
modifying the scale of the probability :math:`P`, and sometimes also that of
the random variable :math:`x`, in such a way that the plot against :math:`x` of
the anticipated cumulative distribution function :math:`P = F(x)` of the
variable appears as a straight line. Typically, the Gumbel probability paper
(Gumbel 1958) is used because in many cases the distribution of the extremes,
each selected from r events, asymptotically approaches the Gumbel distribution
when :math:`r` goes to infinity.
"""
from scipy import integrate as _integrate
import numpy as _np
from scipy import stats as _st
import matplotlib.pyplot as _plt
_fact = _np.math.factorial
docstringbase = """
Calculate extreme values based on yearly maxima using {0} plotting
positions and a least square fit.
This methodology differ from others in the module in the location of the
probability plotting position.
**Parameters**
data : array_like
Extreme values dataset.
preconditioning : int or float
You can choose to apply an exponent to the extreme data values before
performing the Gumbel curve fit. Preconditioning can often improve the
convergence of the curve fit and therefore improve the estimate T-year
extreme wind speed. Default value is 1.
**Attributes**
results : dict
A dictionary containing different parameters of the fit.
c : float
Value of the 'shape' parameter. In the case of the Gumbel distribution
this value is always 0.
loc : float
Value of the 'localization' parameter.
scale : float
Value os the 'scale' parameter.
distr : frozen ``scipy.stats.gumbel_r`` distribution
Frozen distribution of type ``scipy.stats.gumbel_r`` with ``c``,
``loc`` and ``scale`` parameters equal to ``self.c``, ``self.loc``
and ``self.scale``, respectively.
**Methods**
Methods to calculate the fit:
{1}
Methods to plot results:
self.plot_summary()
"""
class _GumbelBase:
def __init__(self, preconditioning = 1, ppp = None, **kwargs):
super().__init__(**kwargs)
self.preconditioning = preconditioning
self.ppp = None
self.results = {}
def plot_summary(self):
"""
Summary plot including PP plot, QQ plot, empirical and fitted pdf and
return values and periods.
**Returns**
4-panel plot including PP, QQ, pdf and return level plots
"""
# data to be used
x = self.results['data']
extremes = self.results['Values for return period from 2 to 100 years']
Y = self.results['Y']
slope = self.results['slope']
offset = self.results['offset']
how = self.ppp
xmin = _np.min(x)
xmax = _np.max(x)
# figure settings
fig, (ax1, ax2, ax3) = _plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle(how)
# Plot the fit
ax1.plot([xmin, xmax],
[(xmin - offset) / slope, (xmax - offset) / slope],
'-', color = '0.25', lw = 2, alpha = 0.5)
ax1.scatter(x, Y,
facecolor = (0.7,0.7,1), color = '0',
s= 50, linewidths = 1)
ax1.set_ylabel('$-ln(-ln(P))$')
ax1.set_xlabel('$Extreme\ values$')
# plot the return period
ax2.plot(_np.arange(2,101), extremes)
ax2.set_xlabel('$T (years)$')
ax2.set_ylabel('$Extreme\ values$')
# plot the distribution
_x = _np.linspace(self.distr.ppf(0.001), self.distr.ppf(0.999), 100)
ax3.hist(x,
density = True, alpha = 0.2)
ax3.plot(_x, self.distr.pdf(_x), label = 'Fitted', color = 'k')
desf = xmax * 0.1
ax3.set_xlim(xmin - desf, xmax + desf)
ax3.set_ylabel('$Probability$')
ax3.set_xlabel('$Extreme\ values$')
return fig, ax1, ax2, ax3
class Harris1996(_GumbelBase):
__doc__ = docstringbase.format('Harris1996', '_ppp_harris1996')
def __init__(self, data = None, ppp = "Harris1996", **kwargs):
super().__init__(**kwargs)
try:
self.data = data
self.N = len(self.data)
self.ppp = ppp
self._ppp_harris1996()
except:
raise Exception('You should provide some data.')
#ppp stands for probability plotting position
def _ppp_harris1996(self):
"""
Review of the traditional Gumbel extreme value method for analysing yearly
maximum windspeeds or similar data, with a view to improving the
process. An improved set of plotting positions based on the mean values of
the order statistics are derived, together with a means of obtaining the
standard deviation of each position. This enables a fitting procedure using
weighted least squares to be adopted, which gives results similar to the
traditional Lieblein BLUE process, but with the advantages that it does not
require tabulated coefficients, is available for any number of data up to at
least 50, and provides a quantitative measure of goodness of fit.
**References**
Harris RI, (1996), 'Gumbel re-visited -- a new look at extreme value
statistics applied to wind speeds', Journal of Wind Engineering and
Industrial Aerodynamics, 59, 1-22.
"""
data = _np.sort(self.data)[::-1]
data = data ** self.preconditioning
N = self.N
ymean = _np.empty(N)
ymean2 = _np.empty(N)
variance = _np.empty(N)
weight = _np.empty(N)
def integ_ymean(x, N, NU):
return -_np.log(-_np.log(x)) * (x** (N-NU)) * ((1-x)**(NU-1))
def integ_ymean2(x, N, NU):
return ((-_np.log(-_np.log(x)))**2) * (x** (N-NU)) * ((1-x)**(NU-1))
for NU in range(1, N+1):
# calculation of ymean
a = _fact(N)/(_fact(NU - 1) * _fact(N - NU))
b, err = _integrate.quad(integ_ymean, 0, 1, args = (N, NU))
ymean[NU-1] = a * b
# calculation of ymean2
b, err = _integrate.quad(integ_ymean2, 0, 1, args = (N, NU))
ymean2[NU-1] = a * b
# calculation of variance
variance[NU-1] = _np.sqrt((ymean2[NU-1] - ymean[NU-1]**2))
# calculation of weights
denominator = _np.sum(1/variance**2)
for NU in range(1, N+1):
weight[NU-1] = (1 / variance[NU-1]**2) / denominator
# calculation of alpha
# Numerator
sum1 = _np.sum(weight * ymean * (data))
sum2 = _np.sum(weight * ymean)
sum3 = _np.sum(weight * (data))
# Denominator
sum4 = _np.sum(weight * (data**2))
sum5 = sum3 ** 2
# alpha
alpha = (sum1 - sum2 * sum3) / (sum4 - sum5)
# calculation of characteristic product
pi_upper = alpha * sum3 - sum2
# calculation of the extreme values for the return periods between 2 and 100 years
return_periods = _np.arange(2, 100 + 1)
v_ext_tmp = [(-_np.log(-_np.log(1 - 1 / return_period)) + pi_upper) / alpha
for return_period in return_periods]
v_ext = [v ** (1 / self.preconditioning) for v in v_ext_tmp]
# Calculation of the residual std dev
deviation = _np.sum(weight * ((ymean - alpha * data + pi_upper)**2))
residual_stddev = _np.sqrt(deviation * N / (N - 2))
self.results = {}
self.results['Y'] = ymean
self.results['weights'] = weight
self.results['data'] = data
self.results['Values for return period from 2 to 100 years'] = v_ext
self.results['slope'] = 1. / alpha
self.results['offset'] = pi_upper / alpha
self.results['characteristic product'] = pi_upper
self.results['alpha'] = alpha
self.results['residual standard deviation'] = residual_stddev
self.c = 0
self.loc = self.results['offset']
self.scale = self.results['slope']
self.distr = _st.gumbel_r(loc = self.loc,
scale = self.scale)
class Lieblein(_GumbelBase):
__doc__ = docstringbase.format('Lieblein', '_ppp_lieblein')
def __init__(self, data = None, ppp = "Lieblein", **kwargs):
super().__init__(**kwargs)
try:
self.data = data
self.N = len(self.data)
self.ppp = ppp
self._ppp_lieblein()
except:
raise Exception('You should provide some data.')
#ppp stands for probability plotting position
def _ppp_lieblein(self):
"""
Lieblein-BLUE (Best Linear Unbiased Estimator) to obtain extreme values
using a Type I (Gumbel) extreme value distribution.
It approaches the calculation of extremes using a very classical
methodology provided by Julius Lieblein. It exists just to check how
several consultants made the calculation of wind speed extremes in the wind
energy industry.
It calculates extremes using an adjustment of Gumbel distribution using
least squares fit and considering several probability-plotting positions
used in the wild.
**References**
Lieblein J, (1974), 'Efficient methods of Extreme-Value Methodology',
NBSIR 74-602, National Bureau of Standards, U.S. Department of Commerce.
"""
# coefficients for sample below or equal to 16 elements
ai = {
'n = 02': [0.916373, 0.083627],
'n = 03': [0.656320, 0.255714, 0.087966],
'n = 04': [0.510998, 0.263943, 0.153680, 0.071380],
'n = 05': [0.418934, 0.246282, 0.167609, 0.108824,
0.058350],
'n = 06': [0.355450, 0.225488, 0.165620, 0.121054,
0.083522, 0.048867],
'n = 07': [0.309008, 0.206260, 0.158590, 0.123223,
0.093747, 0.067331, 0.041841],
'n = 08': [0.273535, 0.189428, 0.150200, 0.121174,
0.097142, 0.075904, 0.056132, 0.036485],
'n = 09': [0.245539, 0.174882, 0.141789, 0.117357,
0.097218, 0.079569, 0.063400, 0.047957,
0.032291],
'n = 10': [0.222867, 0.162308, 0.133845, 0.112868,
0.095636, 0.080618, 0.066988, 0.054193,
0.041748, 0.028929],
'n = 11': [0.204123, 0.151384, 0.126522, 0.108226,
0.093234, 0.080222, 0.068485, 0.057578,
0.047159, 0.036886, 0.026180],
'n = 12': [0.188361, 0.141833, 0.119838, 0.103673,
0.090455, 0.079018, 0.068747, 0.059266,
0.050303, 0.041628, 0.032984, 0.023894],
'n = 13': [0.174916, 0.133422, 0.113759, 0.099323,
0.087540, 0.077368, 0.068264, 0.059900,
0.052047, 0.044528, 0.037177, 0.029790,
0.021965],
'n = 14': [0.163309, 0.125966, 0.108230, 0.095223,
0.084619, 0.075484, 0.067331, 0.059866,
0.052891, 0.046260, 0.039847, 0.033526,
0.027131, 0.020317],
'n = 15': [0.153184, 0.119314, 0.103196, 0.091384,
0.081767, 0.073495, 0.066128, 0.059401,
0.053140, 0.047217, 0.041529, 0.035984,
0.030484, 0.024887, 0.018894],
'n = 16': [0.144271, 0.113346, 0.098600, 0.087801,
0.079021, 0.071476, 0.064771, 0.058660,
0.052989, 0.047646, 0.042539, 0.037597,
0.032748, 0.027911, 0.022969, 0.017653]
}
bi = {
'n = 02': [-0.721348, 0.721348],
'n = 03': [-0.630541, 0.255816, 0.374725],
'n = 04': [-0.558619, 0.085903, 0.223919, 0.248797],
'n = 05': [-0.503127, 0.006534, 0.130455, 0.181656,
0.184483],
'n = 06': [-0.459273, -0.035992, 0.073199, 0.126724,
0.149534, 0.145807],
'n = 07': [-0.423700, -0.060698, 0.036192, 0.087339,
0.114868, 0.125859, 0.120141],
'n = 08': [-0.394187, -0.075767, 0.011124, 0.058928,
0.087162, 0.102728, 0.108074, 0.101936],
'n = 09': [-0.369242, -0.085203, -0.006486, 0.037977,
0.065574, 0.082654, 0.091965, 0.094369,
0.088391],
'n = 10': [-0.347830, -0.091158, -0.019210, 0.022179,
0.048671, 0.066064, 0.077021, 0.082771,
0.083552, 0.077940],
'n = 11': [-0.329210, -0.094869, -0.028604, 0.010032,
0.035284, 0.052464, 0.064071, 0.071381,
0.074977, 0.074830, 0.069644],
'n = 12': [-0.312840, -0.097086, -0.035655, 0.000534,
0.024548, 0.041278, 0.053053, 0.061112,
0.066122, 0.068357, 0.067671, 0.062906],
'n = 13': [-0.298313, -0.098284, -0.041013, -0.006997,
0.015836, 0.032014, 0.043710, 0.052101,
0.057862, 0.061355, 0.062699, 0.061699,
0.057330],
'n = 14': [-0.285316, -0.098775, -0.045120, -0.013039,
0.008690, 0.024282, 0.035768, 0.044262,
0.050418, 0.054624, 0.057083, 0.057829,
0.056652, 0.052642],
'n = 15': [-0.273606, -0.098768, -0.048285, -0.017934,
0.002773, 0.017779, 0.028988, 0.037452,
0.043798, 0.048415, 0.051534, 0.053267,
0.053603, 0.052334, 0.048648],
'n = 16': [-0.262990, -0.098406, -0.050731, -0.021933,
-0.002167, 0.012270, 0.023168, 0.031528,
0.037939, 0.042787, 0.046308, 0.048646,
0.049860, 0.049912, 0.048602, 0.045207]
}
data = _np.sort(self.data)
data = data ** self.preconditioning
N = self.N
# hyp and coeffs are used to calculate values for samples higher than 16 elements
# Hypergeometric distribution function
def hyp(n,m,i,t):
bin1 = _fact(i)/(_fact(t) * _fact(i - t))
bin2 = _fact(n-i)/(_fact(m-t) * _fact((n-i) - (m-t)))
bin3 = _fact(n)/(_fact(m) * _fact(n - m))
return bin1 * bin2 / bin3
# Coefficients
def coeffs(n, m):
aip = []
bip = []
for i in range(n):
a = 0
b = 0
for t in range(m):
try:
a += ai['n = {:02}'.format(m)][t] * ((t + 1) / (i + 1)) * hyp(n, m, i + 1, t + 1)
b += bi['n = {:02}'.format(m)][t] * ((t + 1) / (i + 1)) * hyp(n, m, i + 1, t + 1)
except:
pass
aip.append(a)
bip.append(b)
return aip, bip
def distr_params():
if N <= 16:
mu = _np.sum(_np.array(ai['n = {:02}'.format(N)]) * data) #parameter u in the paper
sigma = _np.sum(_np.array(bi['n = {:02}'.format(N)]) * data) #parameter b in the paper
else:
aip, bip = coeffs(N, 16)
mu = _np.sum(_np.array(aip) * data)
sigma = _np.sum(_np.array(bip) * data)
return mu, sigma
mu, sigma = distr_params()
return_period = _np.arange(2, 100 + 1)
P = ((_np.arange(N) + 1)) / (N + 1)
Y = -_np.log(-_np.log(P))
vref = (- sigma * _np.log(-_np.log(1 - 1 / return_period)) + mu)**(1 / self.preconditioning)
self.results = {}
self.results['Y'] = Y
self.results['data'] = data
self.results['Values for return period from 2 to 100 years'] = vref
self.results['slope'] = sigma
self.results['offset'] = mu
self.c = 0
self.loc = self.results['offset']
self.scale = self.results['slope']
self.distr = _st.gumbel_r(loc = self.loc,
scale = self.scale)
class PPPLiterature(_GumbelBase):
__doc__ = docstringbase.format('several', """_ppp_adamowski
_ppp_beard
_ppp_blom
_ppp_gringorten
_ppp_hazen
_ppp_hirsch
_ppp_iec56
_ppp_landwehr
_ppp_laplace
_ppp_mm
_ppp_tukey
_ppp_weibull""")
def __init__(self, data = None, ppp = "Weibull", **kwargs):
super().__init__(**kwargs)
try:
self.data = data
self.N = len(self.data)
self.ppp = ppp
self._calculate_values(how = self.ppp)
except:
raise Exception('You should provide some data.')
#ppp stands for probability plotting position
def _calculate_values(self, how = None):
data = _np.sort(self.data)
data = data ** self.preconditioning
N = self.N
if how == 'Adamowski':
# see De, M., 2000. A new unbiased plotting position formula for gumbel distribution.
# Stochastic Envir. Res. Risk Asses., 14: 1-7.
P = ((_np.arange(N) + 1) - 0.25) / (N + 0.5)
if how == 'Beard':
# see De, M., 2000. A new unbiased plotting position formula for gumbel distribution.
# Stochastic Envir. Res. Risk Asses., 14: 1-7.
P = ((_np.arange(N) + 1) - 0.31) / (N + 0.38)
if how == 'Blom':
# see Adeboye, O.B. and M.O. Alatise, 2007. Performance of probability distributions and plotting
# positions in estimating the flood of River Osun at Apoje Sub-basin, Nigeria. Agric. Eng. Int.: CIGR J., Vol. 9.
P = ((_np.arange(N) + 1) - 0.375) / (N + 0.25)
if how == 'Chegodayev':
# see De, M., 2000. A new unbiased plotting position formula for gumbel distribution.
# Stochastic Envir. Res. Risk Asses., 14: 1-7.
P = ((_np.arange(N) + 1) - 0.3) / (N + 0.4)
if how == 'Cunnane':
# see Cunnane, C., 1978. Unbiased plotting positions: A review. J. Hydrol., 37: 205-222.
P = ((_np.arange(N) + 1) - 0.4) / (N + 0.2)
if how == 'Gringorten':
# see Adeboye, O.B. and M.O. Alatise, 2007. Performance of probability distributions and plotting
# positions in estimating the flood of River Osun at Apoje Sub-basin, Nigeria. Agric. Eng. Int.: CIGR J., Vol. 9.
P = ((_np.arange(N) + 1) - 0.44) / (N + 0.12)
if how == 'Hazen':
# see Adeboye, O.B. and M.O. Alatise, 2007. Performance of probability distributions and plotting
# positions in estimating the flood of River Osun at Apoje Sub-basin, Nigeria. Agric. Eng. Int.: CIGR J., Vol. 9.
P = ((_np.arange(N) + 1) - 0.5) / (N)
if how == 'Hirsch':
# see Jay, R.L., O. Kalman and M. Jenkins, 1998. Integrated planning and management for Urban water
# supplies considering multi uncertainties. Technical Report,
# Department of Civil and Environmental Engineering, Universities of California.
P = ((_np.arange(N) + 1) + 0.5) / (N + 1)
if how == 'IEC56':
# see Forthegill, J.C., 1990. Estimating the cumulative probability of failure data points to be
# plotted on weibull and other probability paper. Electr. Insulation Transact., 25: 489-492.
P = ((_np.arange(N) + 1) - 0.5) / (N + 0.25)
if how == 'Landwehr':
# see Makkonen, L., 2008. Problem in the extreme value analysis. Structural Safety, 30: 405-419.
P = ((_np.arange(N) + 1) - 0.35) / (N)
if how == 'Laplace':
# see Jay, R.L., O. Kalman and M. Jenkins, 1998. Integrated planning and management for Urban
# water supplies considering multi uncertainties. Technical Report,
# Department of Civil and Environmental Engineering, Universities of California.
P = ((_np.arange(N) + 1) + 1) / (N + 2)
if how == 'McClung and Mears':
# see Makkonen, L., 2008. Problem in the extreme value analysis. Structural Safety, 30: 405-419.
P = ((_np.arange(N) + 1) - 0.4) / (N)
if how == 'Tukey':
# see Makkonen, L., 2008. Problem in the extreme value analysis. Structural Safety, 30: 405-419.
P = ((_np.arange(N) + 1) - 1/3) / (N + 1/3)
if how == 'Weibull':
# see Hynman, R.J. and Y. Fan, 1996. Sample quantiles in statistical packages. Am. Stat., 50: 361-365.
P = ((_np.arange(N) + 1)) / (N + 1)
Y = -_np.log(-_np.log(P))
slope, offset = _np.polyfit(Y, data, 1)
R2 = _np.corrcoef(Y, data)[0, 1]**2
#fit = slope * Y + offset
return_period = _np.arange(2,101)
vref = (- slope * _np.log(-_np.log(1 - 1 / return_period)) + offset)**(1 / self.preconditioning)
self.results = {}
self.results['data'] = data
self.results['Y'] = Y
self.results['Values for return period from 2 to 100 years'] = vref
self.results['R2'] = R2
self.results['slope'] = slope
self.results['offset'] = offset
self.c = 0
self.loc = self.results['offset']
self.scale = self.results['slope']
self.distr = _st.gumbel_r(loc = self.loc,
scale = self.scale)
def _ppp_adamowski(self):
"""
Perform the calculations using the Adamowski method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.25}{N + 0.5}
**References**
De, M., 2000. A new unbiased plotting position formula for gumbel
distribution. Stochastic Envir. Res. Risk Asses., 14: 1-7.
"""
self._calculate_values(how = "Adamowski")
def _ppp_beard(self):
"""
Perform the calculations using the Beard method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.31}{N + 0.38}
**References**
De, M., 2000. A new unbiased plotting position formula for gumbel
distribution. Stochastic Envir. Res. Risk Asses., 14: 1-7.
"""
self._calculate_values(how = "Beard")
def _ppp_blom(self):
"""
Perform the calculations using the Blom method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.375}{N + 0.25}
**References**
De, M., 2000. A new unbiased plotting position formula for gumbel
distribution. Stochastic Envir. Res. Risk Asses., 14: 1-7.
"""
self._calculate_values(how = "Blom")
def _ppp_chegodayev(self):
"""
Perform the calculations using the Chegodayev method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.3}{N + 0.4}
**References**
De, M., 2000. A new unbiased plotting position formula for gumbel
distribution. Stochastic Envir. Res. Risk Asses., 14: 1-7.
"""
self._calculate_values(how = "Chegodayev")
def _ppp_cunnane(self):
"""
Perform the calculations using the Cunnane method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.4}{N + 0.2}
**References**
Cunnane, C., 1978. Unbiased plotting positions: A review.
J. Hydrol., 37: 205-222.
"""
self._calculate_values(how = "Cunnane")
def _ppp_gringorten(self):
"""
Perform the calculations using the Gringorten method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.44}{N + 0.12}
**References**
Adeboye, O.B. and M.O. Alatise, 2007. Performance of probability
distributions and plotting positions in estimating the flood of
River Osun at Apoje Sub-basin, Nigeria. Agric. Eng. Int.:
CIGR J., Vol. 9.
"""
self._calculate_values(how = "Gringorten")
def _ppp_hazen(self):
"""
Perform the calculations using the Hazen method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.5}{N}
**References**
Adeboye, O.B. and M.O. Alatise, 2007. Performance of probability
distributions and plotting positions in estimating the flood of
River Osun at Apoje Sub-basin, Nigeria. Agric. Eng. Int.:
CIGR J., Vol. 9.
"""
self._calculate_values(how = "Hazen")
def _ppp_hirsch(self):
"""
Perform the calculations using the Hirsch method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) + 0.5}{N + 1}
**References**
Jay, R.L., O. Kalman and M. Jenkins, 1998. Integrated planning and
management for Urban water supplies considering multi uncertainties.
Technical Report, Department of Civil and Environmental Engineering,
Universities of California.
"""
self._calculate_values(how = "Hirsch")
def _ppp_iec56(self):
"""
Perform the calculations using the IEC56 method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.5}{N + 0.25}
**References**
Forthegill, J.C., 1990. Estimating the cumulative probability of
failure data points to be plotted on weibull and other probability
paper. Electr. Insulation Transact., 25: 489-492.
"""
self._calculate_values(how = "IEC56")
def _ppp_landwehr(self):
"""
Perform the calculations using the Landwehr method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.35}{N}
**References**
Makkonen, L., 2008. Problem in the extreme value analysis.
Structural Safety, 30: 405-419.
"""
self._calculate_values(how = "Landwehr")
def _ppp_laplace(self):
"""
Perform the calculations using the Laplace method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) + 1}{N + 2}
**References**
Jay, R.L., O. Kalman and M. Jenkins, 1998. Integrated planning and
management for Urban water supplies considering multi uncertainties.
Technical Report, Department of Civil and Environmental Engineering,
Universities of California.
"""
self._calculate_values(how = "Laplace")
def _ppp_mm(self):
"""
Perform the calculations using the McClung and Mears method available
for the probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 0.4}{N}
**References**
Makkonen, L., 2008. Problem in the extreme value analysis.
Structural Safety, 30: 405-419.
"""
self._calculate_values(how = "McClung and Mears")
def _ppp_tukey(self):
"""
Perform the calculations using the Tukey method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) - 1/3}{N + 1/3}
**References**
Makkonen, L., 2008. Problem in the extreme value analysis.
Structural Safety, 30: 405-419.
"""
self._calculate_values(how = "Tukey")
def _ppp_weibull(self):
"""
Perform the calculations using the Weibull method available for the
probability positions.
Probability positions are defined as:
.. math::
P = \\frac{(N + 1) + 1}{N + 1}
**References**
Hynman, R.J. and Y. Fan, 1996. Sample quantiles in statistical
packages. Am. Stat., 50: 361-365.
"""
self._calculate_values(how = "Weibull") | mit |
robintw/scikit-image | skimage/feature/util.py | 37 | 4729 | import numpy as np
from ..util import img_as_float
from .._shared.utils import assert_nD
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
assert_nD(image, 2)
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
| bsd-3-clause |
RDCEP/psims | pysims/translators/dssat46/out2psimsdaily.py | 1 | 8414 | #!/usr/bin/env python
import calendar
import netCDF4
import numpy as np
import os
import pandas as pd
import sys
import traceback
from cStringIO import StringIO
from datetime import datetime, timedelta
from .. import translator
# Return int with num days per year
def days_per_year(year):
if calendar.isleap(year):
return 366
return 365
# Return a list of date indexes to be included in a yearly netcdf (limit to 730)
def indexes(year, ref_year):
dates = []
ref_day = datetime(ref_year, 1, 1)
first_index = (datetime(year, 1, 1) - ref_day).days
last_index = first_index + 730
return range(first_index, last_index)
# Get index of matching date from list
def get_date_index(dates, dt):
if len(dates) == 0:
return None
first = dates[0]
index = (dt - first).days
if index >= 0 and index < len(dates) and dates[index] == dt:
return index
else:
return None
# Parse daily DSSAT output and append to a dictionary of numpy arrays
def read_daily(filename, variables, data, scens, scen_years, runs, num_years, lat, lon, fill_value, ref_year, dates):
daily_file = open(filename, 'r')
is_data = False
run = -1
indexes = {}
for line in daily_file:
line = line.strip()
if not line: continue
if line.startswith('*'):
is_data = False
elif line.startswith('@'):
headers = []
run += 1
scen_index = int(run * np.double(scen_years) / (num_years))
line = line.lstrip('@')
is_data = True
start_year = ref_year + (run % num_years)
if is_data:
line = line.split()
if len(headers) == 0:
for i,l in enumerate(line):
line[i] = l.replace('%', 'P')
headers.extend(line)
for header in headers:
indexes[header] = headers.index(header)
else:
year = int(line[indexes['YEAR']])
doy = int(line[indexes['DOY']])
dt = datetime(year, 1, 1) + timedelta(days=doy - 1)
dt_position = get_date_index(dates, dt)
for v in variables:
if dt_position is not None and v in indexes:
val = line[indexes[v]]
data[start_year][v][dt_position, scen_index, 0, 0] = val
return data
# Return a list of variables to be used per dssat filename
def variables_by_file(df, variables):
result = {}
for index,row in df.iterrows():
if row.variable in variables:
try:
result[row.filename].append(row.variable)
except KeyError:
result[row.filename] = [row.variable]
for v in variables:
if v not in [x for z in result.values() for x in z]:
print "Warning: Cannot find variable %s, skipping" % v
return result
class Out2PsimsDaily(translator.Translator):
def run(self, latidx, lonidx):
try:
num_scenarios = self.config.get('scens', '1')
num_years = self.config.get('num_years', '1')
variables = self.config.get('variables', '')
units = self.config.get('var_units', '')
delta = self.config.get('delta', '30')
ref_year = self.config.get('ref_year', '1958')
daily_csv = pd.read_csv('%s%s%s' % (os.path.dirname(__file__), os.sep, 'daily_variables.csv'))
outputfile = self.config.get_dict(self.translator_type, 'outputfile', default = '../../outputs/daily_%04d_%04d.psims.nc' % (latidx, lonidx))
scen_years = self.config.get('scen_years', num_years)
start_date = datetime(ref_year, 1, 1)
end_date = datetime(ref_year + num_years - 1, 12, 31)
dates = [start_date + timedelta(days=x) for x in range(0, (end_date-start_date).days+1)]
runs = num_scenarios
num_scenarios = int(num_scenarios * np.double(scen_years) / num_years)
latidx = int(latidx)
lonidx = int(lonidx)
delta = delta.split(',')
latdelta = np.double(delta[0]) / 60. # convert from arcminutes to degrees
londelta = latdelta if len(delta) == 1 else np.double(delta[1]) / 60.
scens = np.arange(num_scenarios)
variables = self.config.get('daily_variables').split(',')
variable_files = variables_by_file(daily_csv, variables)
lat = 90. - latdelta * (latidx - 0.5)
lon = -180. + londelta * (lonidx - 0.5)
fill_value = netCDF4.default_fillvals['f4']
data = {}
# Populate data array
for filename,varlist in variable_files.iteritems():
for v in varlist:
for start_year in range(ref_year, ref_year+num_years):
try:
data[start_year][v] = np.empty(shape=(len(dates), len(scens), 1, 1), dtype=float)
data[start_year][v].fill(fill_value)
except KeyError:
data[start_year] = {}
data[start_year][v] = np.empty(shape=(len(dates), len(scens), 1, 1), dtype=float)
data[start_year][v].fill(fill_value)
data = read_daily(filename, varlist, data, scens, scen_years, runs, num_years, 0, 0, fill_value, ref_year, dates)
# Save to NetCDF
for year in data:
current_outputfile = outputfile.replace('psims.nc', '%04d.psims.nc' % year)
netcdf_output = netCDF4.Dataset(current_outputfile, 'w', format='NETCDF4', fill_value=fill_value, zlib=None)
scen_dim = netcdf_output.createDimension('scen', len(scens))
scen_var = netcdf_output.createVariable('scen', 'i4', ('scen'))
scen_var.units = "count"
scen_var.long_name = "scenario"
scen_var[:] = scens[:]
time_dim = netcdf_output.createDimension('time', None)
time_var = netcdf_output.createVariable('time', 'i4', ('time'))
time_var.units = "days since %04d-%02d-%02d 00:00:00" % (start_date.year, start_date.month, start_date.day)
time_var.calendar = 'gregorian'
lat_dim = netcdf_output.createDimension('lat', 1)
lat_var = netcdf_output.createVariable('lat', 'f8', ('lat'))
lat_var.units = "degrees_north"
lat_var.long_name = "longitude"
lat_var[:] = lat
lon_dim = netcdf_output.createDimension('lon', 1)
lon_var = netcdf_output.createVariable('lon', 'f8', ('lon'))
lon_var.units = "degrees_east"
lon_var.long_name = "longitude"
lon_var[:] = lon
first_idx = None
last_idx = None
times = []
for v in data[year]:
times = indexes(year, ref_year)
time_var[:] = times
first_idx = times[0]
last_idx = times[-1]
for key,val in data[year].iteritems():
var = netcdf_output.createVariable(key, 'f4', ('time', 'scen', 'lat', 'lon'), fill_value=fill_value)
var[:] = val[first_idx:last_idx, :, 0, 0]
units = daily_csv['units'][daily_csv["variable"] == key].iloc[0]
if units:
var.units = units
long_name = daily_csv['long_name'][daily_csv["variable"] == key].iloc[0]
if long_name:
var.long_name = long_name
times = []
netcdf_output.close()
return True
except:
print "[%s] (%s/%s): %s" % (os.path.basename(__file__), latidx, lonidx, traceback.format_exc())
return False
| agpl-3.0 |
vtsuperdarn/davitpy | davitpy/pydarn/sdio/sdDataTypes.py | 1 | 50094 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The classes needed for reading, writing, and storing processed radar data,
such as grid and map data
.. moduleauthor:: AJ, 20130108
***************
* sdDataTypes *
***************
Classes
---------
sdDataPtr
sdBaseData
gridData
mapData
"""
import logging
from davitpy.utils import twoWayDict
class sdDataPtr():
"""A class which contains a pipeline to a data source
Parameters
----------
sTime : datetime
start time of the request
hemi : str
hemisphere of data interested in
fileType : str
the type of file: 'grd', 'grdex', 'grid2', 'map', 'mapex'
or 'map2'
eTime : Optional[datetime]
end time of the request. If none, then a full day is
requested.
src : Optional[str]
source of files: local of sftp
fileName : Optional[str]
name of the file opened
noCache : Optional[bool]
true to not use cached files, regenerate tmp files
local_dirfmt : Optional[str]
format of the local directory structure. Default: rcParams'
'DAVIT_SD_LOCAL_DIRFORMAT' value.
local_fnamefmt : Optional[str]
format of the local filenames. Default: rcParams'
'DAVIT_SD_LOCAL_FNAMEFMT' value.
local_dict : Optional[dict]
dictionary of the hemisphere and file type. Default: use
the values given for hemi and fileType.
remote_dirfmt : Optional[str]
format of the remote directory structure. Default: rcParams'
'DAVIT_SD_REMOTE_DIRFORMAT' value.
remote_fnamefmt : Optional[str]
format of the remote filenames. Default: rcParams'
'DAVIT_SD_REMOTE_FNAMEFMT' value.
remote_dict : Optional[dict]
dictionary of the hemisphere and file type. Default: use
the values given for hemi and fileType.
remote_site : Optional[str]
remote sftp server to use. Default: use value given in
rcParams' 'DB' value or 'sd-data.ece.vt.edu' if not set in
rcParams.
username : Optional[str]
username to use for an sftp connection. Default: rcParams'
'DBREADUSER' value.
password : Optional[str]
password to use for an sftp connection. Default: rcParams'
'DBREADPASS' value.
port : Optional[str]
port to use for an sftp connection. Deafult: rcParams'
'DB_PORT' value.
tmpdir : Optional[str]
directory to download and source files from locally. Default:
rcParams' 'DAVIT_TMPDIR' value.
Attributes
-----------
sTime : (datetime)
start time of the request
eTime : (datetime)
end time of the request
hemi : str
hemisphere of data interested in
fType : str
the file type, 'grd', 'map', 'grdex', 'mapex', 'grid2'
or 'map2'
recordIndex : (dict)
look up dictionary for file offsets for scan times
Private Attributes
--------------------
ptr : (file or mongodb query object)
the data pointer (different depending on mongodo or dmap)
fd : (int)
the file descriptor
fileName : (str)
name of the file opened
nocache : (bool)
do not use cached files, regenerate tmp files
src : (str)
local or sftp
Methods
--------
open
close
createIndex
Index the offsets for all records and scan boundaries
offsetSeek
Seek file to requested byte offset, checking to make sure it in the
record index
offsetTell
Current byte offset
rewind
rewind file back to the beginning
readRec
read record at current file offset
readScan
read scan associated with current record
readAll
read all records
Written by AJ 20130607
"""
def __init__(self, sTime, hemi, fileType, eTime=None, src=None,
fileName=None, noCache=False, local_dirfmt=None,
local_fnamefmt=None, local_dict=None, remote_dirfmt=None,
remote_fnamefmt=None, remote_dict=None, remote_site=None,
username=None, password=None, port=None, tmpdir=None,
remove=False, try_file_types=True):
# from davitpy.pydarn.sdio import sdDataPtr
from davitpy.utils.timeUtils import datetimeToEpoch
import datetime as dt
import os
import glob
import string
from davitpy.pydarn.radar import network
import davitpy.pydarn.sdio.fetchUtils as futils
import davitpy
self.sTime = sTime
self.eTime = eTime
self.hemi = hemi
self.fType = fileType
self.dType = None
self.recordIndex = None
self.__filename = fileName
self.__nocache = noCache
self.__src = src
self.__fd = None
self.__ptr = None
# check inputs
assert isinstance(self.sTime, dt.datetime), \
logging.error('sTime must be datetime object')
assert hemi is not None, \
logging.error("hemi must not be None")
assert self.eTime is None or isinstance(self.eTime, dt.datetime), \
logging.error('eTime must be datetime object or None')
assert(fileType == 'grd' or fileType == 'grdex' or
fileType == 'map' or fileType == 'mapex' or
fileType == 'grid2' or fileType == 'map2'), \
logging.error("fileType must be one of: grd, grdex, grid2, "
"map, mapex, or map2")
assert fileName is None or isinstance(fileName, str), \
logging.error('fileName must be None or a string')
assert src is None or src == 'local' or src == 'sftp', \
logging.error('src must be one of: None, local, or sftp')
if self.eTime is None:
self.eTime = self.sTime + dt.timedelta(days=1)
filelist = []
arr = [fileType]
if try_file_types:
file_array = {'grd': ['grd', 'grdex', 'grid2'],
'map': ['map', 'mapex', 'map2']}
try:
file_key = fileType[0:3]
file_array[file_key].pop(file_array[file_key].index(fileType))
arr.extend(file_array[file_key])
except:
pass
# a temporary directory to store a temporary file
if tmpdir is None:
try:
tmpdir = davitpy.rcParams['DAVIT_TMPDIR']
except:
logging.warning("Unable to set temporary directory with "
"rcParams. Using extra default of /tmp/sd/")
tmpdir = '/tmp/sd/'
d = os.path.dirname(tmpdir)
if not os.path.exists(d):
os.makedirs(d)
cached = False
# First, check if a specific filename was given
if fileName is not None:
try:
if not os.path.isfile(fileName):
estr = 'problem reading [{:}]: file does '.format(fileName)
logging.error('{:s}not exist'.format(estr))
return None
epoch = int(datetimeToEpoch(dt.datetime.now()))
outname = "{:s}{:d}".format(tmpdir, epoch)
if(string.find(fileName, '.bz2') != -1):
outname = string.replace(fileName, '.bz2', '')
command = 'bunzip2 -c {:s} > {:s}'.format(fileName,
outname)
elif(string.find(fileName, '.gz') != -1):
outname = string.replace(fileName, '.gz', '')
command = 'gunzip -c {:s} > {:s}'.format(fileName, outname)
else:
command = 'cp {:s} {:s}'.format(fileName, outname)
logging.info('performing: {:s}'.format(command))
os.system(command)
filelist.append(outname)
except Exception, e:
logging.error(e)
logging.error('problem reading file [{:s}]'.format(fileName))
return None
# Next, check for a cached file
if fileName is not None and not noCache:
try:
if not cached:
command = "{:s}????????.??????.????????.????"\
.format(tmpdir)
command = "{:s}??.{:s}.{:s}".format(command, hemi,
fileType)
for f in glob.glob(command):
try:
ff = string.replace(f, tmpdir, '')
# check time span of file
t1 = dt.datetime(int(ff[0:4]), int(ff[4:6]),
int(ff[6:8]), int(ff[9:11]),
int(ff[11:13]), int(ff[13:15]))
t2 = dt.datetime(int(ff[16:20]), int(ff[20:22]),
int(ff[22:24]), int(ff[25:27]),
int(ff[27:29]), int(ff[29:31]))
# check if file covers our timespan
if t1 <= self.sTime and t2 >= self.eTime:
cached = True
filelist.append(f)
logging.info('Found cached file '
'{:s}'.format(f))
break
except Exception, e:
logging.warning(e)
except Exception, e:
logging.warning(e)
# Next, LOOK LOCALLY FOR FILES
if not cached and (src is None or src == 'local') and fileName is None:
try:
for ftype in arr:
estr = "\nLooking locally for {:s} files ".format(ftype)
estr = "{:s}with hemi: {:s}".format(estr, hemi)
logging.info(estr)
# If the following aren't already, in the near future
# they will be assigned by a configuration dictionary
# much like matplotlib's rcsetup.py (matplotlibrc)
if local_dirfmt is None:
try:
local_dirfmt = \
davitpy.rcParams['DAVIT_SD_LOCAL_DIRFORMAT']
except:
local_dirfmt = '/sd-data/{year}/{ftype}/{hemi}/'
estr = "Config entry DAVIT_SD_LOCAL_DIRFORMAT not "
estr = "{:s}set, using default: ".format(estr)
logging.info("{:s}{:s}".format(estr, local_dirfmt))
if local_dict is None:
local_dict = {'hemi': hemi, 'ftype': ftype}
if 'ftype' in local_dict.keys():
local_dict['ftype'] = ftype
if local_fnamefmt is None:
try:
local_fnamefmt = \
davitpy.rcParams['DAVIT_SD_LOCAL_FNAMEFMT']\
.split(',')
except:
local_fnamefmt = ['{date}.{hemi}.{ftype}']
estr = 'Environment variable DAVIT_SD_LOCAL_'
estr = '{:s}FNAMEFMT not set, using '.format(estr)
estr = '{:s}default: {:s}'.format(estr,
local_fnamefmt)
logging.info(estr)
outdir = tmpdir
# fetch the local files
temp = futils.fetch_local_files(self.sTime, self.eTime,
local_dirfmt, local_dict,
outdir, local_fnamefmt,
remove=remove)
# check to see if the files actually have data between
# stime and etime
valid = self.__validate_fetched(temp, self.sTime,
self.eTime)
filelist = [x[0] for x in zip(temp, valid) if x[1]]
invalid_files = [x[0] for x in zip(temp, valid)
if not x[1]]
if len(invalid_files) > 0:
for f in invalid_files:
estr = 'removing invalid file: {:s}'.format(f)
logging.info(estr)
os.system('rm {:s}'.format(f))
# If we have valid files then continue
if len(filelist) > 0:
estr = 'found {:s} data in local files'.format(ftype)
logging.info(estr)
self.fType = ftype
self.dType = 'dmap'
fileType = ftype
break
else:
estr = "couldn't find any local {:s}".format(ftype)
logging.info(estr)
except Exception, e:
logging.warning(e)
estr = "Unable to fetch any local data, attempting to fetch "
logging.warning("{:s}remote data".format(estr))
src = None
# Finally, check the sftp server if we have not yet found files
if((src is None or src == 'sftp') and self.__ptr is None and
len(filelist) == 0 and fileName is None):
for ftype in arr:
estr = 'Looking on the remote SFTP server for '
logging.info('{:s}{:s} files'.format(estr, ftype))
try:
# If the following aren't already, in the near future
# they will be assigned by a configuration dictionary
# much like matplotlib's rcsetup.py (matplotlibrc)
if remote_site is None:
try:
remote_site = davitpy.rcParams['DB']
except:
remote_site = 'sd-data.ece.vt.edu'
estr = 'Config entry DB not set, using default: '
logging.info("{:s}{:s}".format(estr, remote_site))
if username is None:
try:
username = davitpy.rcParams['DBREADUSER']
except:
username = 'sd_dbread'
estr = 'Config entry DBREADUSER not set, using '
estr = '{:s}default: {:s}'.format(estr, username)
logging.info(estr)
if password is None:
try:
password = davitpy.rcParams['DBREADPASS']
except:
password = '5d'
estr = 'Config entry DBREADPASS not set, using '
estr = '{:s}default: {:s}'.format(estr, password)
logging.info(estr)
if remote_dirfmt is None:
try:
remote_dirfmt = \
davitpy.rcParams['DAVIT_SD_REMOTE_DIRFORMAT']
except:
remote_dirfmt = 'data/{year}/{ftype}/{hemi}/'
estr = 'Config entry DAVIT_SD_REMOTE_DIRFORMAT not'
estr = '{:s} set, using default: '.format(estr)
estr = '{:s}{:s}'.format(estr, remote_dirfmt)
logging.info(estr)
if remote_dict is None:
remote_dict = {'ftype': ftype, 'hemi': hemi}
if 'ftype' in remote_dict.keys():
remote_dict['ftype'] = ftype
if remote_fnamefmt is None:
try:
remote_fnamefmt = \
davitpy.rcParams['DAVIT_SD_REMOTE_FNAMEFMT']\
.split(',')
except:
remote_fnamefmt = ['{date}.{hemi}.{ftype}']
estr = 'Config entry DAVIT_SD_REMOTE_FNAMEFMT not '
estr = '{:s}set, using default: '.format(estr)
estr = '{:s}{:s}'.format(estr, remote_fnamefmt)
logging.info(estr)
if port is None:
try:
port = davitpy.rcParams['DB_PORT']
except:
port = '22'
estr = 'Config entry DB_PORT not set, using '
estr = '{:s}default'.formart(estr)
logging.info('{:s}: {:s}'.format(estr, port))
outdir = tmpdir
# Now fetch the files
temp = futils.fetch_remote_files(self.sTime, self.eTime,
'sftp', remote_site,
remote_dirfmt,
remote_dict, outdir,
remote_fnamefmt,
username=username,
password=password,
port=port, remove=remove)
# check to see if the files actually have data between
# stime and etime
valid = self.__validate_fetched(temp, self.sTime,
self.eTime)
filelist = [x[0] for x in zip(temp, valid) if x[1]]
invalid_files = [x[0] for x in zip(temp, valid)
if not x[1]]
if len(invalid_files) > 0:
for f in invalid_files:
estr = "removing invalid file: {:s}".format(f)
logging.info(estr)
os.system("rm {:s}".format(f))
# If we have valid files then continue
if len(filelist) > 0:
estr = 'found {:s} data on sftp server'.format(ftype)
logging.info(estr)
self.fType = ftype
self.dType = 'dmap'
fileType = ftype
break
else:
estr = "couldn't find {:s} data on sftp ".format(ftype)
logging.info("{:s}server".format(estr))
except Exception, e:
logging.warning(e)
logging.warning('problem reading from sftp server')
# check if we have found files
if len(filelist) != 0:
# concatenate the files into a single file
if not cached:
logging.info('Concatenating all the files in to one')
# choose a temp file name with time span info for cacheing
tmpname = '{:s}{:s}.{:s}'\
.format(tmpdir, self.sTime.strftime("%Y%m%d"),
self.sTime.strftime("%H%M%S"))
tmpname = '{:s}.{:s}.{:s}'\
.format(tmpname, self.eTime.strftime("%Y%m%d"),
self.eTime.strftime("%H%M%S"))
tmpname = '{:s}.{:s}.{:s}'.format(tmpname, hemi, fileType)
command = "cat {:s} > {:s}".format(string.join(filelist),
tmpname)
logging.info("performing: {:s}".format(command))
os.system(command)
for filename in filelist:
command = "rm {:s}".format(filename)
logging.info("performing: {:s}".format(command))
os.system(command)
else:
tmpname = filelist[0]
self.fType = fileType
self.dType = 'dmap'
self.__filename = tmpname
self.open()
if self.__ptr is not None:
if self.dType is None:
self.dType = 'dmap'
else:
logging.info('Sorry, we could not find any data for you :(')
def __repr__(self):
my_str = 'sdDataPtr\n'
for key, var in self.__dict__.iteritems():
my_str = "{:s}{:s} = {:s}\n".format(my_str, key, str(var))
return my_str
def __del__(self):
self.close()
def __iter__(self):
return self
def next(self):
beam = self.readRec()
if beam is None:
raise StopIteration
else:
return beam
def open(self):
"""open the associated dmap filename."""
import os
self.__fd = os.open(self.__filename, os.O_RDONLY)
self.__ptr = os.fdopen(self.__fd)
def createIndex(self):
import datetime as dt
import davitpy.pydarn.dmapio as dmapio
recordDict = {}
starting_offset = self.offsetTell()
# rewind back to start of file
self.rewind()
while 1:
# read the next record from the dmap file
offset = dmapio.getDmapOffset(self.__fd)
dfile = dmapio.readDmapRec(self.__fd)
if dfile is None:
# if we dont have valid data, clean up, get out
logging.info('reached end of data')
break
else:
try:
dtime = dt.datetime(dfile['start.year'],
dfile['start.month'],
dfile['start.day'],
dfile['start.hour'],
dfile['start.minute'],
int(dfile['start.second']))
dfile['time'] = (dtime -
dt.datetime(1970, 1, 1)).total_seconds()
except Exception, e:
logging.warning(e)
logging.warning('problem reading time from file')
break
dfile_utc = dt.datetime.utcfromtimestamp(dfile['time'])
if dfile_utc >= self.sTime and dfile_utc <= self.eTime:
rectime = dt.datetime.utcfromtimestamp(dfile['time'])
recordDict[rectime] = offset
# reset back to before building the index
self.recordIndex = recordDict
self.offsetSeek(starting_offset)
return recordDict
def offsetSeek(self, offset, force=False):
"""jump to dmap record at supplied byte offset.
Require offset to be in record index list unless forced.
"""
from davitpy.pydarn.dmapio import setDmapOffset, getDmapOffset
if force:
return dmapio.setDmapOffset(self.__fd, offset)
else:
if self.recordIndex is None:
self.createIndex()
if offset in self.recordIndex.values():
return setDmapOffset(self.__fd, offset)
else:
return getDmapOffset(self.__fd)
def offsetTell(self):
"""jump to dmap record at supplied byte offset.
"""
from davitpy.pydarn.dmapio import getDmapOffset
return getDmapOffset(self.__fd)
def rewind(self):
"""jump to beginning of dmap file."""
from davitpy.pydarn.dmapio import setDmapOffset
return setDmapOffset(self.__fd, 0)
def readRec(self):
"""A function to read a single record of radar data from a radDataPtr
object
Returns
--------
mydata : (gridData, mapData, or NoneType)
An object filled with the specified type of data. Will return None
when there is no more data in the pointer to read.
"""
import davitpy.pydarn.dmapio as dmapio
import datetime as dt
# check input
if self.__ptr is None:
logging.error('the pointer does not point to any data')
return None
if self.__ptr.closed:
logging.error('the file pointer is closed')
return None
# do this until we reach the requested start time
# and have a parameter match
while 1:
offset = dmapio.getDmapOffset(self.__fd)
dfile = dmapio.readDmapRec(self.__fd)
# check for valid data
try:
dtime = dt.datetime(dfile['start.year'], dfile['start.month'],
dfile['start.day'], dfile['start.hour'],
dfile['start.minute'],
int(dfile['start.second']))
dfile['time'] = (dtime -
dt.datetime(1970, 1, 1)).total_seconds()
except Exception, e:
logging.warning(e)
logging.warning('problem reading time from file')
break
if(dfile is None or
dt.datetime.utcfromtimestamp(dfile['time']) > self.eTime):
# if we dont have valid data, clean up, get out
logging.info('reached end of data')
return None
# check that we're in the time window, and that we have a
# match for the desired params
if(dt.datetime.utcfromtimestamp(dfile['time']) >= self.sTime and
dt.datetime.utcfromtimestamp(dfile['time']) <= self.eTime):
# fill the beamdata object, checking the file type
if (self.fType == 'grd' or self.fType == 'grdex' or
self.fType == 'grid2'):
mydata = gridData(dataDict=dfile)
elif (self.fType == 'map' or self.fType == 'mapex' or
self.fType == 'map2'):
mydata = mapData(dataDict=dfile)
else:
logging.error('unrecognized file type')
return None
mydata.recordDict = dfile
mydata.fType = self.fType
mydata.fPtr = self
mydata.offset = offset
return mydata
def close(self):
"""close associated dmap file."""
import os
if self.__ptr is not None:
self.__ptr.close()
self.__fd = None
def __validate_fetched(self, filelist, stime, etime):
""" This function checks if the files in filelist contain data
for the start and end times (stime,etime) requested by a user.
Parameters
-----------
filelist : (list)
List of filenames to validate
stime : (datetime.datetime)
Starting time for files
etime : (datetime.datetime)
Ending time for files
Returns
--------
valid : (list of bool)
List of booleans corresponding to each filename. True if a file
contains data in the time range (stime,etime), False if not
"""
# This method will need some modification for it to work with
# file formats that are NOT DMAP (i.e. HDF5). Namely, the dmapio
# specific code will need to be modified (readDmapRec).
import datetime as dt
import numpy as np
from davitpy.pydarn.dmapio import readDmapRec
valid = []
for f in filelist:
logging.info('Checking file: {:s}'.format(f))
stimes = []
etimes = []
# Open the file and create a file pointer
self.__filename = f
self.open()
# Iterate through the file and grab the start time for beam
# integration and calculate the end time from intt.sc and intt.us
while 1:
# read the next record from the dmap file
dfile = readDmapRec(self.__fd)
if(dfile is None):
break
else:
temp = dt.datetime(int(dfile['start.year']),
int(dfile['start.month']),
int(dfile['start.day']),
int(dfile['start.hour']),
int(dfile['start.minute']),
int(dfile['start.second']))
stimes.append(temp)
temp = dt.datetime(int(dfile['end.year']),
int(dfile['end.month']),
int(dfile['end.day']),
int(dfile['end.hour']),
int(dfile['end.minute']),
int(dfile['end.second']))
etimes.append(temp)
# Close the file and clean up
self.close()
self.__ptr = None
inds = np.where((np.array(stimes) >= stime) &
(np.array(stimes) <= etime))
inde = np.where((np.array(etimes) >= stime) &
(np.array(etimes) <= etime))
if np.size(inds) > 0 or np.size(inde) > 0:
valid.append(True)
else:
valid.append(False) # ISSUE 217: FASTER TO NOT USE APPEND
return valid
class sdBaseData():
"""A base class for the processed SD data types. This allows for single
definition of common routines
Methods
---------
updateValsFromDict
converts a dict from a dmap file to baseData
Written by AJ 20130607
"""
def updateValsFromDict(self, adict):
"""A function to to fill an sdBaseData object with the data in a
dictionary that is returned from the reading of a dmap file
Parameters
------------
adict : (dict)
the dictionary containing the radar data
Returns
----------
Void
Notes
-------
In general, users will not need to use this.
Written by AJ 20121130
"""
import datetime as dt
syr = 1
smo = 1
sdy = 1
shr = 1
smt = 1
ssc = 1
eyr = 1
emo = 1
edy = 1
ehr = 1
emt = 1
esc = 1
for key, val in adict.iteritems():
if key == 'start.year':
syr = adict['start.year']
elif key == 'start.month':
smo = adict['start.month']
elif key == 'start.day':
sdy = adict['start.day']
elif key == 'start.hour':
shr = adict['start.hour']
elif key == 'start.minute':
smt = adict['start.minute']
elif key == 'start.second':
ssc = int(adict['start.second'])
elif key == 'end.year':
eyr = adict['end.year']
elif key == 'end.month':
emo = adict['end.month']
elif key == 'end.day':
edy = adict['end.day']
elif key == 'end.hour':
ehr = adict['end.hour']
elif key == 'end.minute':
emt = adict['end.minute']
elif key == 'end.second':
esc = int(adict['end.second'])
elif 'vector.' in key:
if isinstance(self, sdVector):
name = key.replace('vector.', '')
name = name.replace('.', '')
if hasattr(self, name):
setattr(self, name, val)
elif 'model.' in key:
if isinstance(self, sdModel):
name = key.replace('model.', '')
name = name.replace('.', '')
if hasattr(self, name):
setattr(self, name, val)
elif '+' in key:
name = key.replace('+', 'p')
if hasattr(self, name):
setattr(self, name, val)
else:
name = key.replace('.', '')
if hasattr(self, name):
setattr(self, name, val)
if isinstance(self, gridData) or isinstance(self, mapData):
self.sTime = dt.datetime(syr, smo, sdy, shr, smt, ssc)
self.eTime = dt.datetime(eyr, emo, edy, ehr, emt, esc)
def __repr__(self):
mystr = ''
for key, val in self.__dict__.iteritems():
mystr = "{:s}{:s} = {:s}\n".format(mystr, str(key), str(val))
return mystr
class gridData(sdBaseData):
""" a class to contain a record of gridded data, extends sdBaseData
Attributes
-----------
sTime : (datetime)
start time of the record
eTime : (datetime)
end time of the record
stid : (list)
a list of the station IDs in the record, by radar
nvec : (list)
a list of the number of vectors in the record, by radar
freq : (list)
a list of the transmit frequencies, in kHz, by radar
programid : (list)
a list of the program IDs, by radar
noisemean : (list)
a list of the mean noise level, by radar
noisesd : (list)
a list of the standard deviation of noise level, by radar
gsct : (list)
a list of flags indicating whether ground scatter was excluded from the
gridding, by radar
vmin : (list)
a list of minimum allowed Doppler velocity, by radar
vmax : (list)
a list of the maximum allowed Doppler velocity, by radar
pmin : (list)
a list of the minimum allowed power level, by radar
pmax : (list)
a list of the maximum allowed power level, by radar
wmin : (list)
a list of the minimum allowed spectral width, by radar
wmax : (list)
a list of the maximum allowed spectral width, by radar
vemin : (list)
a list of the minimum allowed velocity error, by radar
vemax : (list)
a list of the maximum allowed velocity error, by radar
vector : (sdVector)
an object containing all of the vector.* elements from the file
Written by AJ 20130607
"""
# initialize the struct
def __init__(self, dataDict=None):
self.sTime = None
self.eTime = None
self.stid = None
self.channel = None
self.nvec = None
self.freq = None
self.programid = None
self.noisemean = None
self.noisesd = None
self.gsct = None
self.vmin = None
self.vmax = None
self.pmin = None
self.pmax = None
self.wmin = None
self.wmax = None
self.vemin = None
self.vemax = None
self.vector = sdVector(dataDict=dataDict)
if dataDict is not None:
self.updateValsFromDict(dataDict)
# HERE
class mapData(sdBaseData):
""" a class to contain a record of map potential data, extends sdBaseData
Attributes
------------
sTime : (datetime)
start time of the record
eTime : (datetime)
end time of the record
dopinglevel : (int)
modelwt : (int)
errorwt : (int)
IMFflag : (int)
IMFdelay : (int)
IMFBx : (float)
the Bx component of the IMF
IMFBy : (float)
the By component of the IMF
IMFBz : (float)
the Bz component of the IMF
modelangle : (string)
modellevel : (string)
hemi : (int)
A flag to denote the hemisphere, with 1=north, 2=south?
fitorder : (int)
the order of the spherical harmonic fit
latmin : (float)
the minimum latitude in the spherical harmonic fit
chisqr : (double)
Chi squared value of the spherical harmonic fit
chisqrdat : (double)
Something pertaining to the Chi squared value
rmserr : (double)
an object containing all of the vector.* elements from the file
lonshft : (double)
latshft : (double)
mltstart : (double)
Magnetic local time of the start
mltend : (double)
Magnetic local time of the end
mltav : (double)
Average magnetic local time (?)
potdrop : (double)
the cross polar cap potential, in Volts
potdroperr : (int)
the error in the cross polar cap potential, in Volts
potmax : (double)
Maximum of the cross polar cap potential ?
potmaxerr : (double)
Error of the previous value
potmin : (double)
Minimum of the cross polar cap potential ?
potminerr : (double)
Error of the previous value
grid : (gridData)
an object to hold all of the grid data in the record
N : (list)
Np1 : (list)
Np2 : (list)
Np3 : (list)
model : (sdModel)
an object to hold the model data in the record
Written by AJ 20130607
"""
# initialize the struct
def __init__(self, dataDict=None):
self.sTime = None
self.eTime = None
self.dopinglevel = None
self.modelwt = None
self.errorwt = None
self.IMFflag = None
self.IMFdelay = None
self.IMFBx = None
self.IMFBy = None
self.IMFBz = None
self.modelangle = None
self.modellevel = None
self.hemi = None
self.fitorder = None
self.latmin = None
self.chisqr = None
self.chisqrdat = None
self.rmserr = None
self.lonshft = None
self.latshft = None
self.mltstart = None
self.mltend = None
self.mltav = None
self.potdrop = None
self.potdroperr = None
self.potmax = None
self.potmaxerr = None
self.potmin = None
self.potminerr = None
self.grid = gridData(dataDict=dataDict)
self.N = None
self.Np1 = None
self.Np2 = None
self.Np3 = None
self.model = sdModel(dataDict=dataDict)
if(dataDict is not None):
self.updateValsFromDict(dataDict)
class sdVector(sdBaseData):
""" a class to contain vector records of gridded data, extends sdBaseData
Attributes
-----------
mlat : (list)
the magnetic longitude of the grid cells
mlon : (list)
the magnetic longitude of the grid cells
kvect : (list)
the kvectors of the vectors in the grid cells
stid : (int)
the station ID of the radar which made the measurement of the vector
in the grid cell
channel : (int)
the channel of the radar which made the measurement of the vector in
the grid cell
index : (int)
velmedian : (int)
the median velocity of the vector
velsd : (float)
the standard deviation of the velocity of the vector
pwrmedian : (float)
the median power of the vector
pwrsd : (float)
the standard devation of the power of the vector
wdtmedian : (string)
the median spectral width of the vector
wdtsd : (string)
the standard devation on the spectral width of the vector
Written by AJ 20130607
"""
# initialize the struct
def __init__(self, dataDict=None):
self.mlat = None
self.mlon = None
self.kvect = None
self.stid = None
self.channel = None
self.index = None
self.velmedian = None
self.velsd = None
self.pwrmedian = None
self.pwrsd = None
self.wdtmedian = None
self.wdtsd = None
if(dataDict is not None):
self.updateValsFromDict(dataDict)
class sdModel(sdBaseData):
""" a class to contain model records of map poential data, extends
sdBaseData
Attributes
-------------
mlat : (list)
Magnetic latitude
kvect : (list)
Positional vector
velmedian : (list)
Median velocity at the specified location
boundarymlat : (int)
Bounding magnetic latitude
boundarymlon : (int)
Bounding magnetic longitude
Written by AJ 20130607
"""
# initialize the struct
def __init__(self, dataDict=None):
self.mlat = None
self.mlon = None
self.kvect = None
self.velmedian = None
self.boundarymlat = None
self.boundarymlon = None
if(dataDict is not None):
self.updateValsFromDict(dataDict)
# TESTING CODE
if __name__ == "__main__":
import os
import datetime as dt
import hashlib
import davitpy
try:
tmpdir = davitpy.rcParams['DAVIT_TMPDIR']
except:
tmpdir = '/tmp/sd/'
hemi = 'north'
channel = None
stime = dt.datetime(2012, 7, 10)
etime = dt.datetime(2012, 7, 11, 2)
expected_filename = "20120710.000000.20120711.020000.north.mapex"
expected_path = os.path.join(tmpdir, expected_filename)
expected_filesize = 33008910
expected_md5sum = "1656c94ca564c9a96821496397eed037"
print "Expected File:", expected_path
print "\nRunning sftp grab example for sdDataPtr."
print "Environment variables used:"
print " DB:", davitpy.rcParams['DB']
print " DB_PORT:", davitpy.rcParams['DB_PORT']
print " DBREADUSER:", davitpy.rcParams['DBREADUSER']
print " DBREADPASS:", davitpy.rcParams['DBREADPASS']
print " DAVIT_SD_REMOTE_DIRFORMAT:", \
davitpy.rcParams['DAVIT_SD_REMOTE_DIRFORMAT']
print " DAVIT_SD_REMOTE_FNAMEFMT:", \
davitpy.rcParams['DAVIT_SD_REMOTE_FNAMEFMT']
print " DAVIT_SD_REMOTE_TIMEINC:", \
davitpy.rcParams['DAVIT_SD_REMOTE_TIMEINC']
print " DAVIT_TMPDIR:", davitpy.rcParams['DAVIT_TMPDIR']
src = 'sftp'
if os.path.isfile(expected_path):
os.remove(expected_path)
vtptr = sdDataPtr(stime, hemi, eTime=etime, fileType='mapex', src=src,
noCache=True)
if os.path.isfile(expected_path):
statinfo = os.stat(expected_path)
print "Actual File Size: ", statinfo.st_size
print "Expected File Size:", expected_filesize
md5sum = hashlib.md5(open(expected_path).read()).hexdigest()
print "Actual Md5sum: ", md5sum
print "Expected Md5sum:", expected_md5sum
if expected_md5sum != md5sum:
print "Error: Cached dmap file has unexpected md5sum."
else:
print "Error: Failed to create expected cache file"
print "Let's read two records from the remote sftp server:"
try:
ptr = vtptr
mydata = ptr.readRec()
print mydata.recordDict['time']
mydata = ptr.readRec()
print mydata.recordDict['time']
print "Close pointer"
ptr.close()
print "reopen pointer"
ptr.open()
print "Should now be back at beginning:"
mydata = ptr.readRec()
print mydata.recordDict['time']
print "What is the current offset:"
print ptr.offsetTell()
print "Try to seek to offset 4, shouldn't work:"
print ptr.offsetSeek(4)
print "What is the current offset:"
print ptr.offsetTell()
except:
print "record read failed for some reason"
ptr.close()
del vtptr
print "\nRunning local grab example for sdDataPtr."
print "Environment variables used:"
print " DAVIT_SD_LOCAL_DIRFORMAT:", \
davitpy.rcParams['DAVIT_SD_LOCAL_DIRFORMAT']
print " DAVIT_SD_LOCAL_FNAMEFMT:", \
davitpy.rcParams['DAVIT_SD_LOCAL_FNAMEFMT']
print " DAVIT_SD_LOCAL_TIMEINC:", \
davitpy.rcParams['DAVIT_SD_LOCAL_TIMEINC']
print " DAVIT_TMPDIR:", davitpy.rcParams['DAVIT_TMPDIR']
src = 'local'
if os.path.isfile(expected_path):
os.remove(expected_path)
localptr = sdDataPtr(stime, hemi, eTime=etime, src=src, fileType='mapex',
noCache=True)
if os.path.isfile(expected_path):
statinfo = os.stat(expected_path)
print "Actual File Size: ", statinfo.st_size
print "Expected File Size:", expected_filesize
md5sum = hashlib.md5(open(expected_path).read()).hexdigest()
print "Actual Md5sum: ", md5sum
print "Expected Md5sum:", expected_md5sum
if expected_md5sum != md5sum:
print "Error: Cached dmap file has unexpected md5sum."
else:
print "Error: Failed to create expected cache file"
print "Let's read two records:"
try:
ptr = localptr
mydata = ptr.readRec()
print mydata.recordDict['time']
mydata = ptr.readRec()
print mydata.recordDict['time']
print "Close pointer"
ptr.close()
print "reopen pointer"
ptr.open()
print "Should now be back at beginning:"
mydata = ptr.readRec()
print mydata.recordDict['time']
except:
print "record read failed for some reason"
ptr.close()
del localptr
print ""
print "Now lets grab an RST4.1 and later map2 file type"
hemi = 'south'
channel = None
stime = dt.datetime(2017, 7, 10)
etime = dt.datetime(2017, 7, 11, 2)
expected_filename = "20170710.000000.20170711.020000.south.map2"
expected_path = os.path.join(tmpdir, expected_filename)
expected_filesize = 28284376
expected_md5sum = "de91b6bc239e0ff069732b1ecba5ecf1"
print "Expected File:", expected_path
print "\nRunning sftp grab example for sdDataPtr."
print "Environment variables used:"
print " DB: sd-data.ece.vt.edu"
print " DB_PORT: 22"
print " DBREADUSER: sd_dbread"
print " DBREADPASS: 5d"
print " DAVIT_SD_REMOTE_DIRFORMAT: " + \
"data/{year}/{ftype}/{hemi}/"
print " DAVIT_SD_REMOTE_FNAMEFMT: " + \
"{date}.{hemi}.{ftype}"
print " DAVIT_SD_REMOTE_TIMEINC: 24"
print " DAVIT_TMPDIR:", davitpy.rcParams['DAVIT_TMPDIR']
src = 'sftp'
if os.path.isfile(expected_path):
os.remove(expected_path)
vtptr = sdDataPtr(stime, hemi, eTime=etime, fileType='map2', src=src,
remote_dirfmt="data/{year}/{ftype}/{hemi}/",
remote_fnamefmt="{date}.{hemi}.{ftype}",
remote_site="sd-data.ece.vt.edu",
username="sd_dbread", password="5d", port="22",
noCache=True)
if os.path.isfile(expected_path):
statinfo = os.stat(expected_path)
print "Actual File Size: ", statinfo.st_size
print "Expected File Size:", expected_filesize
md5sum = hashlib.md5(open(expected_path).read()).hexdigest()
print "Actual Md5sum: ", md5sum
print "Expected Md5sum:", expected_md5sum
if expected_md5sum != md5sum:
print "Error: Cached dmap file has unexpected md5sum."
else:
print "Error: Failed to create expected cache file"
print ""
print "Now lets grab an RST4.1 and later grid2 file type"
hemi = 'north'
channel = None
stime = dt.datetime(2017, 7, 10)
etime = dt.datetime(2017, 7, 11, 2)
expected_filename = "20170710.000000.20170711.020000.north.grid2"
expected_path = os.path.join(tmpdir, expected_filename)
expected_filesize = 11931978
expected_md5sum = "c7f555249fc18244f61bb118cc71b2e1"
print "Expected File:", expected_path
print "\nRunning sftp grab example for sdDataPtr."
print "Environment variables used:"
print " DB: sd-data.ece.vt.edu"
print " DB_PORT: 22"
print " DBREADUSER: sd_dbread"
print " DBREADPASS: 5d"
print " DAVIT_SD_REMOTE_DIRFORMAT: " + \
"data/{year}/{ftype}/{hemi}/"
print " DAVIT_SD_REMOTE_FNAMEFMT: " + \
"{date}.{hemi}.{ftype}"
print " DAVIT_SD_REMOTE_TIMEINC: 24"
print " DAVIT_TMPDIR:", davitpy.rcParams['DAVIT_TMPDIR']
src = 'sftp'
if os.path.isfile(expected_path):
os.remove(expected_path)
vtptr = sdDataPtr(stime, hemi, eTime=etime, fileType='grid2', src=src,
remote_dirfmt="data/{year}/{ftype}/{hemi}/",
remote_fnamefmt="{date}.{hemi}.{ftype}",
remote_site="sd-data.ece.vt.edu",
username="sd_dbread", password="5d", port="22",
noCache=True)
if os.path.isfile(expected_path):
statinfo = os.stat(expected_path)
print "Actual File Size: ", statinfo.st_size
print "Expected File Size:", expected_filesize
md5sum = hashlib.md5(open(expected_path).read()).hexdigest()
print "Actual Md5sum: ", md5sum
print "Expected Md5sum:", expected_md5sum
if expected_md5sum != md5sum:
print "Error: Cached dmap file has unexpected md5sum."
else:
print "Error: Failed to create expected cache file"
| gpl-3.0 |
jdnz/qml-rg | Meeting 3/pacman_agent.py | 2 | 3946 | # Coding group: Alexandre,...
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
from collections import deque
from matplotlib import pyplot as plt
import tensorflow
from keras.layers import Convolution2D, Dense, Flatten, Input, merge
from keras.models import Sequential
#https://elitedatascience.com/keras-tutorial-deep-learning-in-python
# =============================
# ATARI Environment Wrapper
# I took the class from this link
# https://github.com/tflearn/tflearn/blob/master/examples/reinforcement_learning/atari_1step_qlearning.py
# =============================
class AtariEnvironment(object):
"""
Small wrapper for gym atari environments.
Responsible for preprocessing screens and holding on to a screen buffer
of size action_repeat from which environment state is constructed.
"""
def __init__(self, gym_env, action_repeat):
self.env = gym_env
self.action_repeat = action_repeat
# Agent available actions, such as LEFT, RIGHT, NOOP, etc...
#To find the meaning of the actions, you should type env.get_action_meanings()
self.gym_actions = range(gym_env.action_space.n)
# Screen buffer of size action_repeat to be able to build
# state arrays of size [1, action_repeat, 84, 84]
self.state_buffer = deque()
def get_initial_state(self):
"""
Resets the atari game, clears the state buffer.
"""
# Clear the state buffer
self.state_buffer = deque()
x_t = self.env.reset()
x_t = self.get_preprocessed_frame(x_t)
s_t = np.stack([x_t for i in range(self.action_repeat)], axis=0)
for i in range(self.action_repeat-1):
self.state_buffer.append(x_t)
return s_t
def get_preprocessed_frame(self, observation):
"""
0) Atari frames: 210 x 160
1) Get image grayscale
2) Rescale image 110 x 84
3) Crop center 84 x 84 (you can crop top/bottom according to the game)
"""
return resize(rgb2gray(observation), (110, 84))[13:110 - 13, :]
def step(self, action_index):
"""
Excecutes an action in the gym environment.
Builds current state (concatenation of action_repeat-1 previous
frames and current one). Pops oldest frame, adds current frame to
the state buffer. Returns current state.
"""
#x_t1 is the screen
#r_t is the reward
#terminal is boolean indicating if the game is finished or not
#info
x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])
x_t1 = self.get_preprocessed_frame(x_t1)
previous_frames = np.array(self.state_buffer)
s_t1 = np.empty((self.action_repeat, 84, 84))
s_t1[:self.action_repeat-1, :] = previous_frames
s_t1[self.action_repeat-1] = x_t1
# Pop the oldest frame, add the current frame to the queue
self.state_buffer.popleft()
self.state_buffer.append(x_t1)
return s_t1, r_t, terminal, info
env = gym.make('MsPacman-v0')
env.reset()
c=AtariEnvironment(env,2)
s_t=c.get_initial_state()
#for _ in range(1000):
# s_t1, r_t, terminal, info=c.step(env.action_space.sample())
#print(r_t)
# env.render()
# #env.step(env.action_space.sample())
# env.step(0)
s_t1, r_t, terminal, info=c.step(env.action_space.sample())
nactions=env.action_space.n
#model = Sequential()
#h = Convolution2D(16, 8, 8, subsample=(4, 4),border_mode='same', activation='relu')(Input((1,84,84)))
#h = Convolution2D(32, 4, 4, subsample=(2, 2),border_mode='same', activation='relu')(h)
#h = Flatten()(h)
#h = Dense(256, activation='relu')(h)
#V = Dense(nactions)(h)
model = Sequential()
model.add(Convolution2D(16, 8, 8, subsample=(4, 4),border_mode='same', activation='relu',input_shape=(1,84,84)))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
| gpl-3.0 |
pvlib/pvlib-python | pvlib/tests/test_solarposition.py | 2 | 38305 | import calendar
import datetime
import warnings
import numpy as np
import pandas as pd
from .conftest import assert_frame_equal, assert_series_equal
from numpy.testing import assert_allclose
import pytest
from pvlib.location import Location
from pvlib import solarposition, spa
from .conftest import requires_ephem, requires_spa_c, requires_numba
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014, 6, 24),
end=datetime.datetime(2014, 6, 26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z'])
@pytest.fixture()
def expected_rise_set_spa():
# for Golden, CO, from NREL SPA website
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 21, 55),
datetime.datetime(2015, 8, 2, 5, 0, 27)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 47, 43),
datetime.datetime(2015, 8, 2, 19, 13, 58)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 12, 4, 45),
datetime.datetime(2015, 8, 2, 12, 6, 58)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
@pytest.fixture()
def expected_rise_set_ephem():
# for Golden, CO, from USNO websites
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 1),
datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 1, 3),
datetime.datetime(2015, 8, 2),
]).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 7, 22, 0),
datetime.datetime(2015, 1, 2, 7, 22, 0),
datetime.datetime(2015, 1, 3, 7, 22, 0),
datetime.datetime(2015, 8, 2, 5, 0, 0)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 16, 47, 0),
datetime.datetime(2015, 1, 2, 16, 48, 0),
datetime.datetime(2015, 1, 3, 16, 49, 0),
datetime.datetime(2015, 8, 2, 19, 13, 0)
]).tz_localize('MST').tolist()
transit = pd.DatetimeIndex([datetime.datetime(2015, 1, 1, 12, 4, 0),
datetime.datetime(2015, 1, 2, 12, 5, 0),
datetime.datetime(2015, 1, 3, 12, 5, 0),
datetime.datetime(2015, 8, 2, 12, 7, 0)
]).tz_localize('MST').tolist()
return pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit},
index=times)
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_sun_rise_set_transit_spa(expected_rise_set_spa, golden):
# solution from NREL SAP web calculator
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 3)]
).tz_localize('UTC').tolist()
transit = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 12, 4, 36),
datetime.datetime(2004, 12, 4, 11, 50, 22)]
).tz_localize('UTC').tolist()
frame = pd.DataFrame({'sunrise': sunrise,
'sunset': sunset,
'transit': transit}, index=times)
result = solarposition.sun_rise_set_transit_spa(times, south.latitude,
south.longitude,
delta_t=65.0)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('1s')
assert_frame_equal(frame, result_rounded)
# test for Golden, CO compare to NREL SPA
result = solarposition.sun_rise_set_transit_spa(
expected_rise_set_spa.index, golden.latitude, golden.longitude,
delta_t=65.0)
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('s').tz_convert('MST')
assert_frame_equal(expected_rise_set_spa, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem(expected_rise_set_ephem, golden):
# test for Golden, CO compare to USNO, using local midnight
result = solarposition.sun_rise_set_transit_ephem(
expected_rise_set_ephem.index, golden.latitude, golden.longitude,
next_or_previous='next', altitude=golden.altitude, pressure=0,
temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected_rise_set_ephem, result_rounded)
# test next sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 2, 15, 3, 0),
datetime.datetime(2015, 1, 2, 21, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(times,
golden.latitude,
golden.longitude,
next_or_previous='next',
altitude=golden.altitude,
pressure=0,
temperature=11,
horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test previous sunrise/sunset with times
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0),
datetime.datetime(2015, 1, 2, 10, 15, 0),
datetime.datetime(2015, 1, 3, 3, 0, 0),
datetime.datetime(2015, 1, 3, 13, 6, 7)
]).tz_localize('MST')
expected = pd.DataFrame(index=times,
columns=['sunrise', 'sunset'],
dtype='datetime64[ns]')
expected['sunrise'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunrise'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'sunrise']])
expected['sunset'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'sunset']])
expected['transit'] = pd.Series(index=times, data=[
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 1), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 2), 'transit'],
expected_rise_set_ephem.loc[datetime.datetime(2015, 1, 3), 'transit']])
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert('MST')
assert_frame_equal(expected, result_rounded)
# test with different timezone
times = times.tz_convert('UTC')
expected = expected.tz_convert('UTC') # resuse result from previous
for col, data in expected.iteritems():
expected[col] = data.dt.tz_convert('UTC')
result = solarposition.sun_rise_set_transit_ephem(
times,
golden.latitude, golden.longitude, next_or_previous='previous',
altitude=golden.altitude, pressure=0, temperature=11, horizon='-0:34')
# round to nearest minute
result_rounded = pd.DataFrame(index=result.index)
for col, data in result.iteritems():
result_rounded[col] = data.dt.round('min').tz_convert(times.tz)
assert_frame_equal(expected, result_rounded)
@requires_ephem
def test_sun_rise_set_transit_ephem_error(expected_rise_set_ephem, golden):
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(expected_rise_set_ephem.index,
golden.latitude,
golden.longitude,
next_or_previous='other')
tz_naive = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 3, 0, 0)])
with pytest.raises(ValueError):
solarposition.sun_rise_set_transit_ephem(tz_naive,
golden.latitude,
golden.longitude,
next_or_previous='next')
@requires_ephem
def test_sun_rise_set_transit_ephem_horizon(golden):
times = pd.DatetimeIndex([datetime.datetime(2016, 1, 3, 0, 0, 0)
]).tz_localize('MST')
# center of sun disk
center = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude)
edge = solarposition.sun_rise_set_transit_ephem(
times,
latitude=golden.latitude, longitude=golden.longitude, horizon='-0:34')
result_rounded = (edge['sunrise'] - center['sunrise']).dt.round('min')
sunrise_delta = datetime.datetime(2016, 1, 3, 7, 17, 11) - \
datetime.datetime(2016, 1, 3, 7, 21, 33)
expected = pd.Series(index=times,
data=sunrise_delta,
name='sunrise').dt.round('min')
assert_series_equal(expected, result_rounded)
@requires_ephem
def test_pyephem_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.pyephem(times, golden_mst.latitude,
golden_mst.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_pyephem_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_calc_time():
import pytz
import math
# validation from USNO solar position calculator online
epoch = datetime.datetime(1970, 1, 1)
epoch_dt = pytz.utc.localize(epoch)
loc = tus
loc.pressure = 0
actual_time = pytz.timezone(loc.tz).localize(
datetime.datetime(2014, 10, 10, 8, 30))
lb = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, tol))
ub = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, 10))
alt = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'alt', math.radians(24.7))
az = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'az', math.radians(116.3))
actual_timestamp = (actual_time - epoch_dt).total_seconds()
assert_allclose((alt.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
assert_allclose((az.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
@requires_ephem
def test_earthsun_distance():
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D')
distance = solarposition.pyephem_earthsun_distance(times).values[0]
assert_allclose(1, distance, atol=0.1)
def test_ephemeris_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.ephemeris(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_no_tz(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 19, 30, 30),
periods=1, freq='D')
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_get_solarposition_error(golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
with pytest.raises(ValueError):
solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11,
method='error this')
@pytest.mark.parametrize("pressure, expected", [
(82000, 'expected_solpos'),
(90000, pd.DataFrame(
np.array([[39.88997, 50.11003, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth',
'elevation', 'equation_of_time', 'zenith'],
index=['2003-10-17T12:30:30Z']))
])
def test_get_solarposition_pressure(
pressure, expected, golden, expected_solpos):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=pressure,
temperature=11)
if isinstance(expected, str) and expected == 'expected_solpos':
expected = expected_solpos
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize("altitude, expected", [
(1830.14, 'expected_solpos'),
(2000, pd.DataFrame(
np.array([[39.88788, 50.11212, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth',
'elevation', 'equation_of_time', 'zenith'],
index=['2003-10-17T12:30:30Z']))
])
def test_get_solarposition_altitude(
altitude, expected, golden, expected_solpos):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
altitude=altitude,
temperature=11)
if isinstance(expected, str) and expected == 'expected_solpos':
expected = expected_solpos
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize("delta_t, method", [
(None, 'nrel_numpy'),
pytest.param(
None, 'nrel_numba',
marks=[pytest.mark.xfail(
reason='spa.calculate_deltat not implemented for numba yet')]),
(67.0, 'nrel_numba'),
(67.0, 'nrel_numpy'),
])
def test_get_solarposition_deltat(delta_t, method, expected_solpos_multi,
golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=2, freq='D', tz=golden.tz)
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
delta_t=delta_t,
temperature=11,
method=method)
this_expected = expected_solpos_multi
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
def test_get_solarposition_no_kwargs(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_ephem
def test_get_solarposition_method_pyephem(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
method='pyephem')
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_nrel_earthsun_distance():
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2)]
).tz_localize('MST')
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601, 1.01486146446]),
index=times)
assert_series_equal(expected, result)
times = datetime.datetime(2015, 1, 2)
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601]),
index=pd.DatetimeIndex([times, ]))
assert_series_equal(expected, result)
def test_equation_of_time():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
output = solarposition.spa_python(times, 37.8, -122.25, 100)
eot = output['equation_of_time']
eot_rng = eot.max() - eot.min() # range of values, around 30 minutes
eot_1 = solarposition.equation_of_time_spencer71(times.dayofyear)
eot_2 = solarposition.equation_of_time_pvcdrom(times.dayofyear)
assert np.allclose(eot_1 / eot_rng, eot / eot_rng, atol=0.3) # spencer
assert np.allclose(eot_2 / eot_rng, eot / eot_rng, atol=0.4) # pvcdrom
def test_declination():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
atmos_refract = 0.5667
delta_t = spa.calculate_deltat(times.year, times.month)
unixtime = np.array([calendar.timegm(t.timetuple()) for t in times])
_, _, declination = spa.solar_position(unixtime, 37.8, -122.25, 100,
1013.25, 25, delta_t, atmos_refract,
sst=True)
declination = np.deg2rad(declination)
declination_rng = declination.max() - declination.min()
declination_1 = solarposition.declination_cooper69(times.dayofyear)
declination_2 = solarposition.declination_spencer71(times.dayofyear)
a, b = declination_1 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.03) # cooper
a, b = declination_2 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.02) # spencer
def test_analytical_zenith():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_zenith = np.deg2rad(output['zenith']) # spa
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith_1 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith_2 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
assert np.allclose(zenith_1, solar_zenith, atol=0.015)
assert np.allclose(zenith_2, solar_zenith, atol=0.025)
def test_analytical_azimuth():
times = pd.date_range(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_azimuth = np.deg2rad(output['azimuth']) # spa
solar_zenith = np.deg2rad(output['zenith'])
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_1 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_2 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
idx = np.where(solar_zenith < np.pi/2)
assert np.allclose(azimuth_1[idx], solar_azimuth.values[idx], atol=0.01)
assert np.allclose(azimuth_2[idx], solar_azimuth.values[idx], atol=0.017)
# test for NaN values at boundary conditions (PR #431)
test_angles = np.radians(np.array(
[[ 0., -180., -20.],
[ 0., 0., -5.],
[ 0., 0., 0.],
[ 0., 0., 15.],
[ 0., 180., 20.],
[ 30., 0., -20.],
[ 30., 0., -5.],
[ 30., 0., 0.],
[ 30., 180., 5.],
[ 30., 0., 10.],
[ -30., 0., -20.],
[ -30., 0., -15.],
[ -30., 0., 0.],
[ -30., -180., 5.],
[ -30., 180., 10.]]))
zeniths = solarposition.solar_zenith_analytical(*test_angles.T)
azimuths = solarposition.solar_azimuth_analytical(*test_angles.T,
zenith=zeniths)
assert not np.isnan(azimuths).any()
def test_hour_angle():
"""
Test conversion from hours to hour angles in degrees given the following
inputs from NREL SPA calculator at Golden, CO
date,times,eot,sunrise,sunset
1/2/2015,7:21:55,-3.935172,-70.699400,70.512721
1/2/2015,16:47:43,-4.117227,-70.699400,70.512721
1/2/2015,12:04:45,-4.026295,-70.699400,70.512721
"""
longitude = -105.1786 # degrees
times = pd.DatetimeIndex([
'2015-01-02 07:21:55.2132',
'2015-01-02 16:47:42.9828',
'2015-01-02 12:04:44.6340'
]).tz_localize('Etc/GMT+7')
eot = np.array([-3.935172, -4.117227, -4.026295])
hours = solarposition.hour_angle(times, longitude, eot)
expected = (-70.682338, 70.72118825000001, 0.000801250)
# FIXME: there are differences from expected NREL SPA calculator values
# sunrise: 4 seconds, sunset: 48 seconds, transit: 0.2 seconds
# but the differences may be due to other SPA input parameters
assert np.allclose(hours, expected)
def test_sun_rise_set_transit_geometric(expected_rise_set_spa, golden_mst):
"""Test geometric calculations for sunrise, sunset, and transit times"""
times = expected_rise_set_spa.index
latitude = golden_mst.latitude
longitude = golden_mst.longitude
eot = solarposition.equation_of_time_spencer71(times.dayofyear) # minutes
decl = solarposition.declination_spencer71(times.dayofyear) # radians
sr, ss, st = solarposition.sun_rise_set_transit_geometric(
times, latitude=latitude, longitude=longitude, declination=decl,
equation_of_time=eot)
# sunrise: 2015-01-02 07:26:39.763224487, 2015-08-02 05:04:35.688533801
# sunset: 2015-01-02 16:41:29.951096777, 2015-08-02 19:09:46.597355085
# transit: 2015-01-02 12:04:04.857160632, 2015-08-02 12:07:11.142944443
test_sunrise = solarposition._times_to_hours_after_local_midnight(sr)
test_sunset = solarposition._times_to_hours_after_local_midnight(ss)
test_transit = solarposition._times_to_hours_after_local_midnight(st)
# convert expected SPA sunrise, sunset, transit to local datetime indices
expected_sunrise = pd.DatetimeIndex(expected_rise_set_spa.sunrise.values,
tz='UTC').tz_convert(golden_mst.tz)
expected_sunset = pd.DatetimeIndex(expected_rise_set_spa.sunset.values,
tz='UTC').tz_convert(golden_mst.tz)
expected_transit = pd.DatetimeIndex(expected_rise_set_spa.transit.values,
tz='UTC').tz_convert(golden_mst.tz)
# convert expected times to hours since midnight as arrays of floats
expected_sunrise = solarposition._times_to_hours_after_local_midnight(
expected_sunrise)
expected_sunset = solarposition._times_to_hours_after_local_midnight(
expected_sunset)
expected_transit = solarposition._times_to_hours_after_local_midnight(
expected_transit)
# geometric time has about 4-6 minute error compared to SPA sunset/sunrise
expected_sunrise_error = np.array(
[0.07910089555555544, 0.06908014805555496]) # 4.8[min], 4.2[min]
expected_sunset_error = np.array(
[-0.1036246955555562, -0.06983406805555603]) # -6.2[min], -4.2[min]
expected_transit_error = np.array(
[-0.011150788888889096, 0.0036508177777765383]) # -40[sec], 13.3[sec]
assert np.allclose(test_sunrise, expected_sunrise,
atol=np.abs(expected_sunrise_error).max())
assert np.allclose(test_sunset, expected_sunset,
atol=np.abs(expected_sunset_error).max())
assert np.allclose(test_transit, expected_transit,
atol=np.abs(expected_transit_error).max())
# put numba tests at end of file to minimize reloading
@requires_numba
def test_spa_python_numba_physical(expected_solpos, golden_mst):
times = pd.date_range(datetime.datetime(2003, 10, 17, 12, 30, 30),
periods=1, freq='D', tz=golden_mst.tz)
with warnings.catch_warnings():
# don't warn on method reload or num threads
# ensure that numpy is the most recently used method so that
# we can use the warns filter below
warnings.simplefilter("ignore")
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy', numthreads=1)
with pytest.warns(UserWarning):
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
with pytest.warns(UserWarning):
# test that we get a warning when reloading to use numpy only
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy', numthreads=1)
| bsd-3-clause |
imaculate/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
alistairlow/tensorflow | tensorflow/contrib/training/python/training/feeding_queue_runner_test.py | 76 | 5052 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
cgre-aachen/gempy | examples/examples/geometries/1_horizontal_stratigraphic.py | 1 | 1483 | """
Model 1 - Horizontal stratigraphic
==================================
"""
# %%
# This is the most simple model of horizontally stacked layers. We start
# by importing the necessary dependencies:
#
# %%
# Importing GemPy
import gempy as gp
import pandas as pd
pd.set_option('precision', 2)
# %%
# Creating the model by importing the input data and displaying it:
#
# %%
data_path = 'https://raw.githubusercontent.com/cgre-aachen/gempy_data/master/'
geo_data = gp.create_data('horizontal', extent=[0, 1000, 0, 1000, 0, 1000], resolution=[50, 50, 50],
path_o=data_path + "/data/input_data/jan_models/model1_orientations.csv",
path_i=data_path + "/data/input_data/jan_models/model1_surface_points.csv")
# %%
# Setting and ordering the units and series:
#
# %%
gp.map_stack_to_surfaces(geo_data, {"Strat_Series": ('rock2', 'rock1'), "Basement_Series": ('basement')})
# %%
gp.plot_2d(geo_data, direction=['y'])
# %%
# Calculating the model:
#
# %%
interp_data = gp.set_interpolator(geo_data, compile_theano=True,
theano_optimizer='fast_compile')
# %%
sol = gp.compute_model(geo_data)
# %%
# Displaying the result in x and y direction:
#
# %%
gp.plot_2d(geo_data, cell_number=[25],
direction=['x'], show_data=True)
# %%
# sphinx_gallery_thumbnail_number = 2
gp.plot_2d(geo_data, cell_number=[25],
direction=['y'], show_data=True)
gp.save_model(geo_data) | lgpl-3.0 |
joshloyal/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/examples/learn/hdf5_classification.py | 17 | 2201 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
import h5py # pylint: disable=g-bad-import-order
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
zxc2694/STM32F429_Quadrotor | program/pythonGUI/gui3_gyroscope.py | 3 | 2710 | ################################################################################
# File name: gui3.py
#
# Function: Display three data from stm32f4 using Python (matplotlib)
# The three data is roll, pith, yall angle of quadcopter attitude.
#
# Reference:http://electronut.in/plotting-real-time-data-from-arduino-using-python/
#
################################################################################
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.v1 = deque([0.0]*maxLen)
self.v2 = deque([0.0]*maxLen)
self.v3 = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 3)
self.addToBuf(self.v1, data[0])
self.addToBuf(self.v2, data[1])
self.addToBuf(self.v3, data[2])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
plt.figure(figsize=(9,8))
self.v1line, = plt.plot(analogData.v1,label="Gyroscope_X",color="red")
self.v2line, = plt.plot(analogData.v2,label="Gyroscope_Y",color="orange")
self.v3line, = plt.plot(analogData.v3,label="Gyroscope_Z",color="green")
plt.xlabel("Time")
plt.ylabel("PWM range")
plt.title("Measure Gyroscope values")
plt.legend() #Show label figure.
plt.ylim([-600, 600]) # Vertical axis scale.
#TEST plt.ylim([-90, 90]) # Vertical axis scale.
plt.grid()
# update plot
def update(self, analogData):
self.v1line.set_ydata(analogData.v1)
self.v2line.set_ydata(analogData.v2)
self.v3line.set_ydata(analogData.v3)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Type:"
print "sudo chmod 777 /dev/ttyUSB0"
print "python gui3_gyroscope.py '/dev/ttyUSB0'"
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # Horizontal axis scale.
analogPlot = AnalogPlot(analogData)
print "plotting data..."
a = 1
# open serial port
ser = serial.Serial(strPort, 9600)
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
if (a < 10):
a = a + 1
else:
print data[0] , data[1] ,data[2]
if(len(data) == 3):
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
| mit |
huggingface/pytorch-transformers | examples/flax/language-modeling/run_mlm_flax.py | 1 | 28127 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a
text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
import logging
import os
import sys
import time
from dataclasses import dataclass, field
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForMaskedLM,
HfArgumentParser,
PreTrainedTokenizerBase,
TensorType,
TrainingArguments,
is_tensorboard_available,
set_seed,
)
# Cache the result
has_tensorboard = is_tensorboard_available()
if has_tensorboard:
try:
from flax.metrics.tensorboard import SummaryWriter
except ImportError as ie:
has_tensorboard = False
print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}")
else:
print(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
train_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
)
validation_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
# Adapted from transformers/data/data_collator.py
# Letting here for now, let's discuss where it should live
@dataclass
class FlaxDataCollatorForLanguageModeling:
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
mlm (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the
inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for
non-masked tokens and the value to predict for the masked token.
mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
.. note::
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
argument :obj:`return_special_tokens_mask=True`.
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]:
# Handle dict or lists with proper padding and conversion to tensor.
batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].copy()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def mask_tokens(
self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = inputs.copy()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = np.full(labels.shape, self.mlm_probability)
special_tokens_mask = special_tokens_mask.astype("bool")
probability_matrix[special_tokens_mask] = 0.0
masked_indices = np.random.binomial(1, probability_matrix).astype("bool")
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool")
indices_random &= masked_indices & ~indices_replaced
random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4")
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
num_samples = len(samples_idx)
samples_to_remove = num_samples % batch_size
if samples_to_remove != 0:
samples_idx = samples_idx[:-samples_to_remove]
sections_split = num_samples // batch_size
batch_idx = np.split(samples_idx, sections_split)
return batch_idx
def write_metric(train_metrics, eval_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
if __name__ == "__main__":
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
level="NOTSET",
datefmt="[%X]",
)
# Log on each process the small summary:
logger = logging.getLogger(__name__)
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples = [line for line in examples if len(line) > 0 and not line.isspace()]
return tokenizer(
examples,
return_special_tokens_mask=True,
padding=padding,
truncation=True,
max_length=max_seq_length,
)
tokenized_datasets = datasets.map(
tokenize_function,
input_columns=[text_column_name],
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Enable tensorboard only on the master node
if has_tensorboard and jax.process_index() == 0:
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix())
# Data collator
# This one will take care of randomly masking the tokens.
data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
model = FlaxAutoModelForMaskedLM.from_config(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype))
# Store some constant
num_epochs = int(training_args.num_train_epochs)
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
# Create learning rate schedule
warmup_fn = optax.linear_schedule(
init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
)
decay_fn = optax.linear_schedule(
init_value=training_args.learning_rate,
end_value=0,
transition_steps=num_train_steps - training_args.warmup_steps,
)
linear_decay_lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
)
# create adam optimizer
adamw = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=1e-8,
weight_decay=training_args.weight_decay,
)
# Setup train state
state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw)
# Define gradient update step fn
def train_step(state, batch, dropout_rng):
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
def loss_fn(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
# compute loss, ignore padded input tokens
label_mask = jnp.where(labels > 0, 1.0, 0.0)
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
# take average
loss = loss.sum() / label_mask.sum()
return loss
grad_fn = jax.value_and_grad(loss_fn)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean(
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
)
return new_state, metrics, new_dropout_rng
# Create parallel version of the train step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
# Define eval fn
def eval_step(params, batch):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
# compute loss, ignore padded input tokens
label_mask = jnp.where(labels > 0, 1.0, 0.0)
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
# compute accuracy
accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask
# summarize metrics
metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()}
metrics = jax.lax.psum(metrics, axis_name="batch")
return metrics
p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
# Replicate the train state on each device
state = jax_utils.replicate(state)
train_metrics = []
train_time = 0
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
train_start = time.time()
# Create sampling rng
rng, input_rng = jax.random.split(rng)
# Generate an epoch by shuffling sampling indices from the train dataset
num_train_samples = len(tokenized_datasets["train"])
train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
# Gather the indexes for creating the batch and do a training step
for i, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
model_inputs = data_collator(samples, pad_to_multiple_of=16)
# Model forward
model_inputs = shard(model_inputs.data)
state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
train_metrics.append(train_metric)
train_time += time.time() - train_start
epochs.write(
f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
)
# ======================== Evaluating ==============================
num_eval_samples = len(tokenized_datasets["validation"])
eval_samples_idx = jnp.arange(num_eval_samples)
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
eval_metrics = []
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
model_inputs = data_collator(samples, pad_to_multiple_of=16)
# Model forward
model_inputs = shard(model_inputs.data)
metrics = p_eval_step(state.params, model_inputs)
eval_metrics.append(metrics)
# normalize eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
eval_normalizer = eval_metrics.pop("normalizer")
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
# Update progress bar
epochs.desc = (
f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
)
# Save metrics
if has_tensorboard and jax.process_index() == 0:
cur_step = epoch * (len(tokenized_datasets["train"]) // train_batch_size)
write_metric(train_metrics, eval_metrics, train_time, cur_step)
# save last checkpoint
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(training_args.output_dir, params=params)
| apache-2.0 |
madjelan/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
fengzhyuan/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/neighbors/regression.py | 8 | 10967 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
nontas/menpo3d | menpo3d/visualize/viewmayavi.py | 1 | 21761 | import numpy as np
from menpo.visualize import Renderer
# The colour map used for all lines and markers
GLOBAL_CMAP = 'jet'
def _parse_marker_size(marker_size, points):
if marker_size is None:
from menpo.shape import PointCloud
pc = PointCloud(points, copy=False)
# This is the way that mayavi automatically computes the scale factor in
# case the user passes scale_factor = 'auto'. We use it for both the
# marker_size as well as the numbers_size.
xyz_min, xyz_max = pc.bounds()
x_min, y_min, z_min = xyz_min
x_max, y_max, z_max = xyz_max
distance = np.sqrt(((x_max - x_min) ** 2 +
(y_max - y_min) ** 2 +
(z_max - z_min) ** 2) /
(4 * pc.n_points ** 0.33))
if distance == 0:
marker_size = 1
else:
marker_size = 0.1 * distance
return marker_size
def _parse_colour(colour):
from matplotlib.colors import ColorConverter
return ColorConverter().to_rgb(colour)
def _check_colours_list(render_flag, colours_list, n_objects, error_str):
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
if render_flag:
if colours_list is None:
# sample colours from jet colour map
colours_list = sample_colours_from_colourmap(n_objects, GLOBAL_CMAP)
if isinstance(colours_list, list):
if len(colours_list) == 1:
colours_list[0] = _parse_colour(colours_list[0])
colours_list *= n_objects
elif len(colours_list) != n_objects:
raise ValueError(error_str)
else:
colours_list = [_parse_colour(colours_list)] * n_objects
else:
colours_list = [None] * n_objects
return colours_list
def _set_numbering(figure, centers, render_numbering=True, numbers_size=None,
numbers_colour='k'):
import mayavi.mlab as mlab
numbers_colour = _parse_colour(numbers_colour)
numbers_size = _parse_marker_size(numbers_size, centers)
if render_numbering:
for k, p in enumerate(centers):
mlab.text3d(p[0], p[1], p[2], str(k), figure=figure,
scale=numbers_size, orient_to_camera=True,
color=numbers_colour, line_width=2)
class MayaviRenderer(Renderer):
"""
Abstract class for performing visualizations using Mayavi.
Parameters
----------
figure_id : str or `None`
A figure name or `None`. `None` assumes we maintain the Mayavi
state machine and use `mlab.gcf()`.
new_figure : bool
If `True`, creates a new figure to render on.
"""
def __init__(self, figure_id, new_figure):
try:
import mayavi.mlab as mlab
except ImportError:
raise ImportError("mayavi is required for viewing 3D objects "
"(consider 'conda/pip install mayavi')")
super(MayaviRenderer, self).__init__(figure_id, new_figure)
self._supported_ext = ['png', 'jpg', 'bmp', 'tiff', # 2D
'ps', 'eps', 'pdf', # 2D
'rib', 'oogl', 'iv', 'vrml', 'obj'] # 3D
n_ext = len(self._supported_ext)
func_list = [lambda obj, fp, **kwargs: mlab.savefig(fp.name, **obj)] * n_ext
self._extensions_map = dict(zip(['.' + s for s in self._supported_ext],
func_list))
# To store actors for clearing
self._actors = []
def get_figure(self):
r"""
Gets the figure specified by the combination of `self.figure_id` and
`self.new_figure`. If `self.figure_id == None` then `mlab.gcf()`
is used. `self.figure_id` is also set to the correct id of the figure
if a new figure is created.
Returns
-------
figure : Mayavi figure object
The figure we will be rendering on.
"""
import mayavi.mlab as mlab
if self.new_figure or self.figure_id is not None:
self.figure = mlab.figure(self.figure_id, bgcolor=(1, 1, 1))
# and reset the view to z forward, y up.
self.figure.scene.camera.view_up = np.array([0, 1, 0])
else:
self.figure = mlab.gcf()
self.figure_id = self.figure.name
return self.figure
def save_figure(self, filename, format='png', size=None,
magnification='auto', overwrite=False):
r"""
Method for saving the figure of the current `figure_id` to file.
Parameters
----------
filename : `str` or `file`-like object
The string path or file-like object to save the figure at/into.
format : `str`
The format to use. This must match the file path if the file path is
a `str`.
size : `tuple` of `int` or ``None``, optional
The size of the image created (unless magnification is set,
in which case it is the size of the window used for rendering). If
``None``, then the figure size is used.
magnification : `double` or ``'auto'``, optional
The magnification is the scaling between the pixels on the screen,
and the pixels in the file saved. If you do not specify it, it will
be calculated so that the file is saved with the specified size.
If you specify a magnification, Mayavi will use the given size as a
screen size, and the file size will be ``magnification * size``.
If ``'auto'``, then the magnification will be set automatically.
overwrite : `bool`, optional
If ``True``, the file will be overwritten if it already exists.
"""
from menpo.io.output.base import _export
savefig_args = {'size': size, 'figure': self.figure,
'magnification': magnification}
# Use the export code so that we have a consistent interface
_export(savefig_args, filename, self._extensions_map, format,
overwrite=overwrite)
@property
def width(self):
r"""
The width of the scene in pixels.
:type: `int`
"""
return self.figure.scene.get_size()[0]
@property
def height(self):
r"""
The height of the scene in pixels.
:type: `int`
"""
return self.figure.scene.get_size()[1]
@property
def modelview_matrix(self):
r"""
Retrieves the modelview matrix for this scene.
:type: ``(4, 4)`` `ndarray`
"""
camera = self.figure.scene.camera
return camera.view_transform_matrix.to_array().astype(np.float32)
@property
def projection_matrix(self):
r"""
Retrieves the projection matrix for this scene.
:type: ``(4, 4)`` `ndarray`
"""
scene = self.figure.scene
camera = scene.camera
scene_size = tuple(scene.get_size())
aspect_ratio = float(scene_size[0]) / float(scene_size[1])
p = camera.get_projection_transform_matrix(
aspect_ratio, -1, 1).to_array().astype(np.float32)
return p
@property
def renderer_settings(self):
r"""
Returns all the information required to construct an identical
renderer to this one.
Returns
-------
settings : `dict`
The dictionary with the following keys:
* ``'width'`` (`int`) : The width of the scene.
* ``'height'`` (`int`) : The height of the scene.
* ``'model_matrix'`` (`ndarray`) : The model array (identity).
* ``'view_matrix'`` (`ndarray`) : The view array.
* ``'projection_matrix'`` (`ndarray`) : The projection array.
"""
return {'width': self.width,
'height': self.height,
'model_matrix': np.eye(4, dtype=np.float32),
'view_matrix': self.modelview_matrix,
'projection_matrix': self.projection_matrix}
def clear_figure(self):
r"""
Method for clearing the current figure.
"""
from mayavi import mlab
mlab.clf(figure=self.figure)
if len(self._actors) > 0:
self.figure.scene.remove_actors(self._actors)
def force_draw(self):
r"""
Method for forcing the current figure to render. This is useful for
the widgets animation.
"""
from pyface.api import GUI
_gui = GUI()
orig_val = _gui.busy
_gui.set_busy(busy=True)
_gui.set_busy(busy=orig_val)
_gui.process_events()
class MayaviVectorViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, points, vectors):
super(MayaviVectorViewer3d, self).__init__(figure_id, new_figure)
self.points = points
self.vectors = vectors
def render(self, colour='r', line_width=2, marker_style='2darrow',
marker_resolution=8, marker_size=None, step=None, alpha=1.0):
from mayavi import mlab
marker_size = _parse_marker_size(marker_size, self.points)
colour = _parse_colour(colour)
mlab.quiver3d(self.points[:, 0], self.points[:, 1], self.points[:, 2],
self.vectors[:, 0], self.vectors[:, 1], self.vectors[:, 2],
figure=self.figure, color=colour, mask_points=step,
line_width=line_width, mode=marker_style,
resolution=marker_resolution, opacity=alpha,
scale_factor=marker_size)
return self
class MayaviPointGraphViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, points, edges):
super(MayaviPointGraphViewer3d, self).__init__(figure_id, new_figure)
self.points = points
self.edges = edges
def render(self, render_lines=True, line_colour='r', line_width=2,
render_markers=True, marker_style='sphere', marker_size=None,
marker_colour='r', marker_resolution=8, step=None, alpha=1.0,
render_numbering=False, numbers_colour='k', numbers_size=None):
from mayavi import mlab
# Render the lines if requested
if render_lines:
line_colour = _parse_colour(line_colour)
# TODO: Make step work for lines as well
# Create the points
if step is None:
step = 1
src = mlab.pipeline.scalar_scatter(self.points[:, 0],
self.points[:, 1],
self.points[:, 2])
# Connect them
src.mlab_source.dataset.lines = self.edges
# The stripper filter cleans up connected lines
lines = mlab.pipeline.stripper(src)
# Finally, display the set of lines
mlab.pipeline.surface(lines, figure=self.figure, opacity=alpha,
line_width=line_width, color=line_colour)
# Render the markers if requested
if render_markers:
marker_size = _parse_marker_size(marker_size, self.points)
marker_colour = _parse_colour(marker_colour)
mlab.points3d(self.points[:, 0], self.points[:, 1],
self.points[:, 2], figure=self.figure,
scale_factor=marker_size, mode=marker_style,
color=marker_colour, opacity=alpha,
resolution=marker_resolution, mask_points=step)
# set numbering
_set_numbering(self.figure, self.points, numbers_size=numbers_size,
render_numbering=render_numbering,
numbers_colour=numbers_colour)
return self
class MayaviTriMeshViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, points, trilist):
super(MayaviTriMeshViewer3d, self).__init__(figure_id, new_figure)
self.points = points
self.trilist = trilist
def _render_mesh(self, mesh_type, line_width, colour, marker_size,
marker_resolution, marker_style, step, alpha):
import mayavi.mlab as mlab
marker_size = _parse_marker_size(marker_size, self.points)
colour = _parse_colour(colour)
mlab.triangular_mesh(self.points[:, 0], self.points[:, 1],
self.points[:, 2], self.trilist,
figure=self.figure, line_width=line_width,
representation=mesh_type, color=colour,
scale_factor=marker_size, mask_points=step,
resolution=marker_resolution, mode=marker_style,
opacity=alpha, tube_radius=None)
def render(self, mesh_type='wireframe', line_width=2, colour='r',
marker_style='sphere', marker_size=None, marker_resolution=8,
normals=None, normals_colour='k', normals_line_width=2,
normals_marker_style='2darrow', normals_marker_size=None,
normals_marker_resolution=8, step=None, alpha=1.0):
if normals is not None:
MayaviVectorViewer3d(self.figure_id, False,
self.points, normals).render(
colour=normals_colour, line_width=normals_line_width, step=step,
marker_style=normals_marker_style,
marker_resolution=normals_marker_resolution,
marker_size=normals_marker_size, alpha=alpha)
self._render_mesh(mesh_type, line_width, colour, marker_size,
marker_resolution, marker_style, step, alpha)
return self
class MayaviTexturedTriMeshViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, points, trilist, texture,
tcoords_per_point):
super(MayaviTexturedTriMeshViewer3d, self).__init__(figure_id,
new_figure)
self.points = points
self.trilist = trilist
self.texture = texture
self.tcoords_per_point = tcoords_per_point
self._actors = []
def _render_mesh(self, mesh_type='surface', ambient_light=0.0,
specular_light=0.0, alpha=1.0):
from tvtk.api import tvtk
pd = tvtk.PolyData()
pd.points = self.points
pd.polys = self.trilist
pd.point_data.t_coords = self.tcoords_per_point
mapper = tvtk.PolyDataMapper()
mapper.set_input_data(pd)
p = tvtk.Property(representation=mesh_type, opacity=alpha,
ambient=ambient_light, specular=specular_light)
actor = tvtk.Actor(mapper=mapper, property=p)
# Get the pixels from our image class which are [0, 1] and scale
# back to valid pixels. Then convert to tvtk ImageData.
texture = self.texture.pixels_with_channels_at_back(out_dtype=np.uint8)
if self.texture.n_channels == 1:
texture = np.stack([texture] * 3, axis=-1)
image_data = np.flipud(texture).ravel()
image_data = image_data.reshape([-1, 3])
image = tvtk.ImageData()
image.point_data.scalars = image_data
image.dimensions = self.texture.width, self.texture.height, 1
texture = tvtk.Texture()
texture.set_input_data(image)
actor.texture = texture
self.figure.scene.add_actors(actor)
self._actors.append(actor)
def render(self, mesh_type='surface', ambient_light=0.0, specular_light=0.0,
normals=None, normals_colour='k', normals_line_width=2,
normals_marker_style='2darrow', normals_marker_resolution=8,
normals_marker_size=None, step=None, alpha=1.0):
if normals is not None:
MayaviVectorViewer3d(self.figure_id, False,
self.points, normals).render(
colour=normals_colour, line_width=normals_line_width, step=step,
marker_style=normals_marker_style,
marker_resolution=normals_marker_resolution,
marker_size=normals_marker_size, alpha=alpha)
self._render_mesh(mesh_type=mesh_type, ambient_light=ambient_light,
specular_light=specular_light, alpha=alpha)
return self
class MayaviColouredTriMeshViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, points, trilist,
colour_per_point):
super(MayaviColouredTriMeshViewer3d, self).__init__(figure_id,
new_figure)
self.points = points
self.trilist = trilist
self.colour_per_point = colour_per_point
self._actors = []
def _render_mesh(self, mesh_type='surface', ambient_light=0.0,
specular_light=0.0, alpha=1.0):
from tvtk.api import tvtk
pd = tvtk.PolyData()
pd.points = self.points
pd.polys = self.trilist
pd.point_data.scalars = (self.colour_per_point * 255.).astype(np.uint8)
mapper = tvtk.PolyDataMapper()
mapper.set_input_data(pd)
p = tvtk.Property(representation=mesh_type, opacity=alpha,
ambient=ambient_light, specular=specular_light)
actor = tvtk.Actor(mapper=mapper, property=p)
self.figure.scene.add_actors(actor)
self._actors.append(actor)
def render(self, mesh_type='surface', ambient_light=0.0, specular_light=0.0,
normals=None, normals_colour='k', normals_line_width=2,
normals_marker_style='2darrow', normals_marker_resolution=8,
normals_marker_size=None, step=None, alpha=1.0):
if normals is not None:
MayaviVectorViewer3d(self.figure_id, False,
self.points, normals).render(
colour=normals_colour, line_width=normals_line_width, step=step,
marker_style=normals_marker_style,
marker_resolution=normals_marker_resolution,
marker_size=normals_marker_size, alpha=alpha)
self._render_mesh(mesh_type=mesh_type, ambient_light=ambient_light,
specular_light=specular_light, alpha=alpha)
return self
class MayaviSurfaceViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, values, mask=None):
super(MayaviSurfaceViewer3d, self).__init__(figure_id, new_figure)
if mask is not None:
values[~mask] = np.nan
self.values = values
def render(self, colour=(1, 0, 0), line_width=2, step=None,
marker_style='2darrow', marker_resolution=8, marker_size=0.05,
alpha=1.0):
from mayavi import mlab
warp_scale = kwargs.get('warp_scale', 'auto')
mlab.surf(self.values, warp_scale=warp_scale)
return self
class MayaviLandmarkViewer3d(MayaviRenderer):
def __init__(self, figure_id, new_figure, group, landmark_group):
super(MayaviLandmarkViewer3d, self).__init__(figure_id, new_figure)
self.group = group
self.landmark_group = landmark_group
def render(self, render_lines=True, line_colour='r', line_width=2,
render_markers=True, marker_style='sphere', marker_size=None,
marker_colour='r', marker_resolution=8, step=None, alpha=1.0,
render_numbering=False, numbers_colour='k', numbers_size=None):
# Regarding the labels colours, we may get passed either no colours (in
# which case we generate random colours) or a single colour to colour
# all the labels with
# TODO: All marker and line options could be defined as lists...
n_labels = self.landmark_group.n_labels
line_colour = _check_colours_list(
render_lines, line_colour, n_labels,
'Must pass a list of line colours with length n_labels or a single '
'line colour for all labels.')
marker_colour = _check_colours_list(
render_markers, marker_colour, n_labels,
'Must pass a list of marker colours with length n_labels or a '
'single marker face colour for all labels.')
marker_size = _parse_marker_size(marker_size, self.landmark_group.points)
numbers_size = _parse_marker_size(numbers_size,
self.landmark_group.points)
# get pointcloud of each label
sub_pointclouds = self._build_sub_pointclouds()
# for each pointcloud
# disabling the rendering greatly speeds up this for loop
self.figure.scene.disable_render = True
for i, (label, pc) in enumerate(sub_pointclouds):
# render pointcloud
pc.view(figure_id=self.figure_id, new_figure=False,
render_lines=render_lines, line_colour=line_colour[i],
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_colour=marker_colour[i],
marker_resolution=marker_resolution, step=step,
alpha=alpha, render_numbering=render_numbering,
numbers_colour=numbers_colour, numbers_size=numbers_size)
self.figure.scene.disable_render = False
return self
def _build_sub_pointclouds(self):
return [(label, self.landmark_group.get_label(label))
for label in self.landmark_group.labels]
| bsd-3-clause |
wackymaster/QTClock | Libraries/matplotlib/backends/backend_pdf.py | 7 | 95987 | # -*- coding: utf-8 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <[email protected]>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import codecs
import os
import re
import struct
import sys
import time
import warnings
import zlib
from io import BytesIO
import numpy as np
from matplotlib.externals.six import unichr
from datetime import datetime
from math import ceil, cos, floor, pi, sin
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib import _path
from matplotlib import _png
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in six.iteritems(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
class Verbatim(object):
"""Store verbatim PDF command content for later inclusion in the
stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in six.iteritems(_pdfops)]))
def _paint_path(fill, stroke):
"""Return the PDF operator to paint a path in the following way:
fill: fill the path with the fill color
stroke: stroke the outline of the path with the line color"""
if stroke:
if fill:
return Op.fill_stroke
else:
return Op.stroke
else:
if fill:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header; png: if the data is already
png compressed, the decode parameters"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
self.pageAnnotations = [] # A list of annotations for the
# current page
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')},
'Annots': self.pageAnnotations,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
annotObject = self.reserveObject('annotation')
self.writeObject(annotObject, theNote)
self.pageAnnotations.append(annotObject)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in six.itervalues(self.alphaStates)]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(six.itervalues(self.images))
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in six.iteritems(self.fontNames):
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file;
# the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename,
self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
msg = ('Embedding TeX font ' + texname + ' - fontinfo=' +
repr(fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = FT2Font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname, stream in six.iteritems(rawcharprocs):
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
cmap = font.get_charmap()
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
face, edge, hatch = hatch_style
if face is not None:
face = tuple(face)
if edge is not None:
edge = tuple(edge)
hatch_style = (face, edge, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(0.1, Op.setlinewidth)
# TODO: We could make this dpi-dependent, but that would be
# an API change
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
def _unpack(self, im):
"""
Unpack the image object im into height, width, data, alpha,
where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
arrays, except alpha is None if the image is fully opaque.
"""
h, w, s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgba = rgba[::-1]
rgb = rgba[:, :, :3]
alpha = rgba[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
if im.is_grayscale:
r, g, b = rgb.astype(np.float32).transpose(2, 0, 1)
gray = (0.3 * r + 0.59 * g + 0.11 * b).astype(np.uint8)[..., None]
return h, w, gray, alpha
else:
rgb = np.array(rgb, order='C')
return h, w, rgb, alpha
def _writePng(self, data):
"""
Write the image *data* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
_png.write_png(data, buffer)
buffer.seek(8)
written = 0
header = bytearray(8)
while True:
n = buffer.readinto(header)
assert n == 8
length, type = struct.unpack(b'!L4s', bytes(header))
if type == b'IDAT':
data = bytearray(length)
n = buffer.readinto(data)
assert n == length
self.currentstream.write(bytes(data))
written += n
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
def _writeImg(self, data, height, width, grayscale, id, smask=None):
"""
Write the image *data* of size *height* x *width*, as grayscale
if *grayscale* is true and RGB otherwise, as pdf object *id*
and with the soft mask (alpha channel) *smask*, which should be
either None or a *height* x *width* x 1 array.
"""
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name('DeviceGray' if grayscale
else 'DeviceRGB'),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if rcParams['pdf.compression']:
png = {'Predictor': 10,
'Colors': 1 if grayscale else 3,
'Columns': width}
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self._writePng(data)
else:
self.currentstream.write(data.tostring())
self.endStream()
def writeImages(self):
for img, pair in six.iteritems(self.images):
height, width, data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, height, width, True, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, height, width, img.is_grayscale,
pair[1].id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
def is_date(x):
return isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in six.iterkeys(self.infoDict):
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
self.check_gc(gc)
h, w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.to_values()
self.file.output(Op.gsave,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or y < 0 or
x > self.file.width * 72 or y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform_point((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1 and
chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(filename)
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / 72.0, self.file.height / 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._rgb, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if params == ('_hatch',) and fill_performed:
different = True
if different:
if params == ('_fillcolor',):
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True):
"""
Create a new PdfPages object.
Parameters
----------
filename: str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty: bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
"""
self._file = PdfFile(filename)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty and
not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure: :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._file.newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| mit |
rustychris/stomel | src/live_dt2.py | 1 | 82189 | # This version uses the newer trigrid2 class, and doesn't bother with
# orthomaker (relevant parts of orthomaker are moved into trigrid2.)
################################################################################
# Maintain a live constrained delaunay triangulation of the grid.
# designed as a mixin
class MissingConstraint(Exception):
pass
import sys
import trigrid2
import numpy as np
def distance_left_of_line(pnt, qp1, qp2):
# return the signed distance for where pnt is located left of
# of the line qp1->qp2
# we don't necessarily get the real distance, but at least something
# with the right sign, and monotonicity
vec = qp2 - qp1
left_vec = np.array( [-vec[1],vec[0]] )
return (pnt[0] - qp1[0])*left_vec[0] + (pnt[1]-qp1[1])*left_vec[1]
try:
import CGAL
import safe_pylab as pylab
from matplotlib import collections
from array_append import array_append
from numpy.linalg import norm,solve
import field
class LiveDtGrid(trigrid2.TriGrid):
has_dt = 1
# if true, skips graph API handling
freeze=0
# if true, stores up modified nodes and edges, and
# updates all at once upon release
holding = 0
# queue of conflicting edges that have been un-constrained to allow for
# an add_edge() to proceed
pending_conflicts = []
edges_to_release = None
# triangles in the "medial axis" with a radius r < density/scale_ratio_for_cutoff
# will be removed.
# making this 2 will allow down to 1 cell across a channel.
# making it 6 "should" force approx. 3 cells across a channel.
scale_ratio_for_cutoff = 2.0
# LiveDT needs to track CGAL vertex handles
node_dtype = trigrid2.TriGrid.node_dtype + \
[('vh',object)]
def __init__(self,*args,**kwargs):
super(LiveDtGrid,self).__init__(*args,**kwargs)
self.populate_dt()
check_i = 0
def check(self):
return
print " --checkplot %05i--"%self.check_i
pylab.figure(10)
pylab.clf()
self.plot_dt()
if self.default_clip is not None:
self.plot_nodes()
pylab.axis(self.default_clip)
pylab.title("--checkplot %05i--"%self.check_i)
pylab.savefig('tmp/dtframe%05i.png'%self.check_i)
self.check_i += 1
pylab.close(10)
@classmethod
def copy_grid(cls,g,bad_edge_action='delete'):
cls_edge_fields = [ s[0] for s in cls.edge_dtype ]
extra_edge_fields = []
cls_node_fields = [ s[0] for s in cls.node_dtype ]
extra_node_fields = []
cls_cell_fields = [ s[0] for s in cls.cell_dtype ]
extra_cell_fields = []
# figure out the new dtype:
for field in g.edge_dtype:
if field[0] not in cls_edge_fields:
extra_edge_fields.append( field )
for field in g.cell_dtype:
if field[0] not in cls_cell_fields:
extra_cell_fields.append( field )
for field in g.node_dtype:
if field[0] not in cls_node_fields:
extra_node_fields.append( field )
dt = LiveDtGrid(extra_node_fields = extra_node_fields,
extra_edge_fields = extra_edge_fields,
extra_cell_fields = extra_cell_fields)
dt.edges = trigrid2.concatenate_safe_dtypes( (dt.edges, g.edges) )
dt.nodes = trigrid2.concatenate_safe_dtypes( (dt.nodes, g.nodes) )
dt.cells = trigrid2.concatenate_safe_dtypes( (dt.cells, g.cells) )
dt.populate_dt()
if len(dt.bad_edges) > 0:
if bad_edge_action == 'delete':
print "There were bad edges - will delete, and reconvert"
for e in dt.bad_edges:
try:
dt.delete_edge(e)
except MissingConstraint:
pass
print "Recreating Delaunay triangulation"
dt.populate_dt()
else:
raise MissingConstraint,"Delayed error - see bad_edges for offending indices"
return dt
def refresh_metadata(self):
""" Should be called when all internal state is changed outside
the mechanisms of add_X, delete_X, move_X, etc.
"""
super(LiveDtGrid,self).refresh_metadata()
if not self.freeze:
self.populate_dt()
def populate_dt(self):
""" Initialize a triangulation with all current edges and nodes.
"""
self.DT = CGAL.Constrained_Delaunay_triangulation_2()
self.bad_edges = []
N = self.Nnodes()
for n in self.valid_node_iter():
if n % 20000 == 0:
print "nodes %d / %d"%(n,N)
pnt = CGAL.Point_2( self.nodes['x'][n,0], self.nodes['x'][n,1] )
self.nodes['vh'][n] = self.DT.insert( pnt )
self.nodes['vh'][n].set_info(n)
E = self.Nedges()
for e in self.valid_edge_iter():
if e % 20000 == 0:
print "edges %d / %d"%(e,E)
a,b = self.edges['nodes'][e]
try:
self.safe_insert_constraint(a,b)
except MissingConstraint:
self.bad_edges.append(e)
def safe_insert_constraint(self,a,b):
""" adds a constraint to the DT, but does a few simple checks first
if it's not safe, raise an Exception
"""
if a < 0 or b < 0 or a==b:
raise Exception,"invalid node indices: %d %d"%(a,b)
if all(self.nodes['x'][a] == self.nodes['x'][b]):
raise Exception,"invalid constraint: points[%d]=%s and points[%d]=%s are identical"%(a,self.nodes['x'][a],
b,self.nodes['x'][b])
if self.verbose > 2:
print " Inserting constraint (populate_dt): %d %d %s %s"%(a,b,self.nodes['vh'][a],self.nodes['vh'][b])
print " node A=%s node B=%s"%(self.nodes[a],self.nodes[b])
print " A.point=%s B.point=%s"%(self.nodes['vh'][a].point(), self.nodes['vh'][b].point())
self.DT.insert_constraint( self.nodes['vh'][a], self.nodes['vh'][b] )
# double check to make sure that it's actually in there...
found_it=0
for dt_e in self.DT.incident_constraints(self.nodes['vh'][a]):
v1 = dt_e.face().vertex( (dt_e.vertex() + 1)%3 )
v2 = dt_e.face().vertex( (dt_e.vertex() + 2)%3 )
if v1==self.nodes['vh'][b] or v2==self.nodes['vh'][b]:
found_it = 1
break
if not found_it:
# we have a conflict - search from a to b
raise MissingConstraint,"Just tried to insert a constraint %d-%d (%s - %s), but it's not there!"%(a,b,
self.nodes['x'][a],
self.nodes['x'][b])
## Hold/release
def hold(self):
if self.holding == 0:
self.holding_nodes = {}
self.holding += 1
def release(self):
if self.holding == 0:
raise Exception,"Tried to release, but holding is already 0"
self.holding -= 1
if self.holding == 0:
## In dt2, this shouldn't be necessary, since there is just the
# one array, and it will already have been extended
# First, make sure that we have enough room for new nodes:
#while len(self.nodes['vh']) < self.Npoints():
# self.nodes['vh'] = array_append(self.nodes['vh'],0)
held_nodes = self.holding_nodes.keys()
# Remove all of the nodes that were alive when we were started
# the hold:
for n in held_nodes:
if self.nodes['vh'][n] != 0:
self.DT.remove_incident_constraints( self.nodes['vh'][n] )
self.dt_remove(n)
# Add back the ones that are currently valid
for n in held_nodes:
if np.isfinite(self.nodes['x'][n,0]):
self.dt_insert(n)
# Add back edges for each one
held_edges = {}
for n in held_nodes:
for e in self.node_to_edges(n):
held_edges[e] = 1
self.edges_to_release = list(held_edges.keys())
while len(self.edges_to_release) > 0:
e = self.edges_to_release.pop()
# call dt_add_edge to get all of the conflicting-edge-detecting
# functionality.
self.dt_add_edge(e)
self.edges_to_release = None
self.holding_nodes=0
return self.holding
def dt_insert(self,n):
""" Given a point that is correctly in self.nodes, and vh that
is large enough, do the work of inserting the node and updating
the vertex handle.
"""
pnt = CGAL.Point_2( self.nodes['x'][n,0], self.nodes['x'][n,1] )
self.nodes['vh'][n] = self.DT.insert(pnt)
self.nodes['vh'][n].set_info(n)
if self.verbose > 2:
print " dt_insert node %d"%n
self.check()
def dt_remove(self,n):
self.DT.remove( self.nodes['vh'][n] )
self.nodes['vh'][n] = 0
if self.verbose > 2:
print " dt_remove node %d"%n
self.check()
def dt_update(self,n):
if self.verbose > 2:
print " dt_update TOP: %d"%n
self.check()
# have to remove any old constraints first:
n_removed = 0
to_remove = []
# probably unnecessary, but queue the deletions to avoid any possibility
# of confusing the iterator
for e in self.DT.incident_constraints(self.nodes['vh'][n]):
n_removed += 1
v1 = e.face().vertex( (e.vertex() + 1)%3 )
v2 = e.face().vertex( (e.vertex() + 2)%3 )
vi1 = v1.info()
vi2 = v2.info()
to_remove.append( (e.face(), e.vertex(), vi1, vi2) )
if self.verbose > 2:
# weird stuff is happening in here, so print out some extra
# info
print " dt_update: found old constraint %s-%s"%(vi1,vi2)
if n_removed != len(self.pnt2edges(n)):
print " WARNING: was going to remove them, but n_removed=%d, but pnt2edges shows"%n_removed
# How many of this point's edges are in the queue to be added?
count_unreleased = 0
if self.edges_to_release:
for e in self.pnt2edges(n):
if e in self.edges_to_release:
count_unreleased += 1
if n_removed + count_unreleased != len(self.pnt2edges(n)):
print self.edges[self.pnt2edges(n),:2]
print "Even after counting edges that are queued for release, still fails."
raise Exception,"Something terrible happened trying to update a node"
for f,v,a,b in to_remove:
self.DT.remove_constraint(f,v)
self.dt_remove(n)
self.dt_insert(n)
# add back any of the constraints that we removed.
# This used to add all constraints involving n, but if we are in the middle
# of a release, pnt2edges() will not necessarily give the same edges as
# constraints
all_pairs = []
for f,v,a,b in to_remove:
all_pairs.append( (a,b) )
self.safe_insert_constraint(a,b)
n_removed -= 1
if n_removed != 0:
print " WARNING: in updating node %d, removed-added=%d"%(n,n_removed)
print " Inserted edges were ",all_pairs
raise Exception,"why does this happen?"
if self.verbose > 2:
print " dt_update END: %d"%n
self.check()
def dt_add_edge(self,e):
a,b = self.edges['nodes'][e]
### Try to anticipate unsafe connections -
for i in range(5): # try 5 times to adjust the conflicting nodes
constr_edges = self.check_line_is_clear(a,b)
if len(constr_edges)>0:
print "--=-=-=-=-=-= Inserting this edge %d-%d will cause an intersection -=-=-=-=-=-=-=--"%(a,b)
for v1,v2 in constr_edges:
#v1 = cgal_e.face().vertex( (cgal_e.vertex()+1)%3 )
#v2 = cgal_e.face().vertex( (cgal_e.vertex()+2)%3 )
print " intersects constrained edge: %d - %d"%(v1.info(),v2.info())
if self.verbose > 1:
if i==0:
self.plot(plot_title="About to prepare_conflicting_edges")
pylab.plot(self.nodes['x'][[a,b],0],
self.nodes['x'][[a,b],1],'m')
# Debugging:
# raise Exception,"Stopping before trying to fix conflicting edges"
self.prepare_conflicting_edges(e,constr_edges)
else:
break
###
self.safe_insert_constraint(a,b)
if a>b:
a,b=b,a
if self.verbose > 2:
print " dt_add_edge: adding constraint %d->%d"%(a,b)
self.check()
def prepare_conflicting_edges(self,e,constr_edges):
# First figure out which side is "open"
# We should only be called when the data in self.edges has already
# been taken care of, so it should be safe to just consult our cell ids.
a,b = self.edges['nodes'][e]
# arrange for a -> b to have the open side to its right
if self.edges['cells'][e,0] >= 0 and self.edges['cells'][e,1] >= 0:
print "prepare_conflicting_edges: both sides are closed!"
return
if self.edges['cells'][e,0] == -1 and self.edges['cells'][e,1] != -1:
a,b = b,a
elif self.edges['cells'][e,1] == -1:
pass
elif self.edges['cells'][e,0] == -2 and self.edges['cells'][e,1] != -2:
a,b = b,a
# otherwise it's already in the correct orientation
print "prepare_conflicting_edges: proceeding for edge %d-%d"%(a,b)
AB = self.nodes['x'][b] - self.nodes['x'][a]
open_dir = np.array( [AB[1],-AB[0]] )
mag = np.sqrt(AB[0]**2+AB[1]**2)
AB /= mag
open_dir /= mag
to_move = [] # node index for nodes that need to be moved.
for cgal_edge in constr_edges:
vh_c,vh_d = cgal_edge
c = vh_c.info()
d = vh_d.info()
if c is None:
print "No node data for conflicting vertex %s"%vh_c
continue
if d is None:
print "No node data for conflicting vertex %s"%vh_d
continue
# 2. which one is on the closed side?
c_beta = np.dot( self.nodes['x'][c] - self.nodes['x'][a],
open_dir)
d_beta = np.dot( self.nodes['x'][d] - self.nodes['x'][a],
open_dir)
if c_beta < 0 and d_beta >= 0:
to_move.append(c)
elif d_beta < 0 and c_beta >= 0:
to_move.append(d)
else:
print "Neither node in conflicting edge appears to be on the closed side"
to_move = np.unique(to_move)
eps = mag / 50.0
for n in to_move:
beta = np.dot( self.nodes['x'][n] - self.nodes['x'][a], open_dir)
if beta >= 0:
raise Exception,"Only nodes with beta<0 should be in this list!"
new_point = self.nodes['x'][n] - (beta-eps)*open_dir
print "prepare_conflicting_edges: Moving node %d to %s"%(n,new_point)
self.move_node(n,new_point)
def dt_remove_edge(self,e,nodes=None):
""" Remove the given edge from the triangulation. In cases
where the edge e has already been updated with different nodes,
pass in nodes as [a,b] to remove the edge as it was.
"""
if nodes is not None:
a,b = nodes
else:
a,b = self.edges['nodes'][e]
## DBG
if a>b:
a,b=b,a
if self.verbose > 2:
print " remove constraint %d->%d"%(a,b)
self.check()
## /DBG
# have to figure out the face,index for this edge
found_edge = 0
for dt_e in self.DT.incident_constraints( self.nodes['vh'][a] ):
v1 = dt_e.face().vertex( (dt_e.vertex() + 1)%3 )
v2 = dt_e.face().vertex( (dt_e.vertex() + 2)%3 )
if self.nodes['vh'][b] == v1 or self.nodes['vh'][b] == v2:
self.DT.remove_constraint( dt_e.face(), dt_e.vertex() )
return
raise MissingConstraint,"Tried to remove edge %i, but it wasn't in the constrained DT"%e
### API for adding/moving/deleting
## NODES
def add_node(self,P):
n = super(LiveDtGrid,self).add_node(P)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[n] = 'add_node'
else:
# self.nodes['vh'][n] = 0 # unnecessary
self.dt_insert(n)
# tricky - a new node may interrupt some existing
# constraint, but when the node is removed the
# constraint is not remembered - so check for that
# explicitly -
interrupted_edge = []
for dt_e in self.DT.incident_constraints(self.nodes['vh'][n]):
a = dt_e.face().vertex( (dt_e.vertex() + 1)%3 )
b = dt_e.face().vertex( (dt_e.vertex() + 2)%3 )
if a.info() != n:
interrupted_edge.append(a.info())
else:
interrupted_edge.append(b.info())
if len(interrupted_edge):
self.push_op(self.uninterrupt_constraint,interrupted_edge)
return n
def uninterrupt_constraint(self,ab):
print "Uninterrupting a constraint. Yes!"
self.safe_insert_constraint(ab[0],ab[1])
def unmodify_edge(self, e, old_data):
""" a bit unsure of this... I don't know exactly where this
gets done the first time
"""
a,b = self.edges['nodes'][e]
n = super(LiveDtGrid,self).unmodify_edge(e,old_data)
if a!=old_data[0] or b!=old_data[1]:
print "unmodifying live_dt edge"
self.safe_insert_constraint(old_data[0],old_data[1])
def unadd_node(self,old_length):
if self.freeze:
pass
elif self.holding:
for i in range(old_length,len(self.nodes['x'])):
self.holding_nodes[i] = 'unadd'
else:
for i in range(old_length,len(self.nodes)):
self.dt_remove(i)
self.nodes['vh'] = self.nodes['vh'][:old_length]
super(LiveDtGrid,self).unadd_node(old_length)
if not (self.freeze or self.holding):
print "HEY - this would be a good time to refresh the neighboring constraints"
def delete_node(self,i,*args,**kwargs):
# there is a keyword argument, remove_edges
# does that need to be interpreted here?
if self.freeze:
pass
elif self.holding:
self.holding_nodes[i] = 'delete_node'
super(LiveDtGrid,self).delete_node(i,*args,**kwargs)
if not self.freeze and not self.holding:
self.dt_remove( i )
def undelete_node(self,i,p):
super(LiveDtGrid,self).undelete_node(i,p)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[i] = 'undelete'
else:
self.dt_insert(i)
def unmove_node(self,i,orig_val):
super(LiveDtGrid,self).unmove_node(i,orig_val)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[i] = 'unmove'
else:
self.dt_update(i)
def move_node(self,i,new_pnt,avoid_conflicts=True):
""" avoid_conflicts: if the new location would cause a
self-intersection, don't move it so far...
if the location is modified, return the actual location, otherwise
return None
"""
if not self.freeze and not self.holding:
# pre-emptively remove constraints and the vertex
# so that there aren't conflicts between the current
# edges and the probe point.
# See if the location will be okay -
to_remove = []
nbrs = [] # neighbor nodes, based only on constrained edges
for e in self.DT.incident_constraints(self.nodes['vh'][i]):
v1 = e.face().vertex( (e.vertex() + 1)%3 )
v2 = e.face().vertex( (e.vertex() + 2)%3 )
vi1 = v1.info()
vi2 = v2.info()
to_remove.append( (e.face(), e.vertex(), vi1, vi2) )
if vi1 == i:
nbrs.append(vi2)
else:
nbrs.append(vi1)
if len(to_remove) != len(self.node_to_edges(i)):
# why is this a warning here, but for unmove_node we bail out?
# I'm not really sure how this happens in the first place...
# this was a warning, but investigating...
raise Exception,"WARNING: move_node len(DT constraints) != len(pnt2edges(i))"
for f,v,a,b in to_remove:
self.DT.remove_constraint(f,v)
self.dt_remove(i)
# With the old edges and vertex out of the way, make sure the new location
# is safe, and adjust necessary
new_pnt = self.adjust_move_node(i,new_pnt,nbrs)
super(LiveDtGrid,self).move_node(i,new_pnt)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[i] = 'move'
else:
# put the node back in, and add back any edges that we removed.
# NB: safer to add only the constraints that were there before, since it
# could be that the DT is not perfectly in sync with self.edges[]
self.dt_insert(i)
for f,v,a,b in to_remove:
self.safe_insert_constraint(a,b)
return new_pnt
def adjust_move_node(self,i,new_pnt,nbrs):
""" Check if it's okay to move the node i to the given point, and
if needed, return a different new_pnt location that won't make an
intersection
i: node index
new_pnt: the requested new location of the node
nbrs: list of neighbor node indices for checking edges
"""
new_pnt = np.asarray(new_pnt,np.float64)
# find existing constrained edges
# for each constrained edge:
# will the updated edge still be valid?
# if not, update new_pnt to be halfway between the old and the new,
# and loop again.
for shorten in range(15): # maximum number of shortenings allowed
all_good = True
# Create a probe vertex so we can call check_line_is_clear()
# sort of winging here for a measure of close things are.
if abs(self.nodes['x'][i] - new_pnt).sum() / (1.0+abs(new_pnt).max()) < 1e-8:
print "In danger of floating point roundoff issues"
all_good = False
break
pnt = CGAL.Point_2( new_pnt[0], new_pnt[1] )
probe = self.DT.insert(pnt)
probe.set_info('PROBE!')
for nbr in nbrs:
valid=True
crossings = self.check_line_is_clear( n1=nbr, v2=probe )
if len(crossings) > 0:
all_good = False
new_pnt = 0.5*(self.nodes['x'][i]+new_pnt)
break
self.DT.remove(probe)
if all_good:
break
else:
if self.verbose>0:
sys.stdout.write("$") ; sys.stdout.flush()
if all_good:
return new_pnt
else:
return self.nodes['x'][i]
## EDGES
def add_edge(self,nodeA,nodeB,*args,**kwargs):
e = super(LiveDtGrid,self).add_edge(nodeA,nodeB,*args,**kwargs)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[ self.edges['nodes'][e,0] ] ='add_edge'
self.holding_nodes[ self.edges['nodes'][e,1] ] ='add_edge'
else:
self.dt_add_edge(e)
return e
def unadd_edge(self,old_length):
if self.freeze:
pass
elif self.holding:
for e in range(old_length,len(self.edges)):
self.holding_nodes[ self.edges['nodes'][e,0] ] ='unadd_edge'
self.holding_nodes[ self.edges['nodes'][e,1] ] ='unadd_edge'
else:
for e in range(old_length,len(self.edges)):
self.dt_remove_edge(e)
super(LiveDtGrid,self).unadd_edge(old_length)
def delete_edge(self,e,*args,**kwargs):
if self.freeze:
pass
elif self.holding:
self.holding_nodes[ self.edges['nodes'][e,0] ] = 'delete_edge'
self.holding_nodes[ self.edges['nodes'][e,1] ] = 'delete_edge'
else:
self.dt_remove_edge(e)
super(LiveDtGrid,self).delete_edge(e,*args,**kwargs)
def undelete_edge(self,e,*args,**kwargs):
super(LiveDtGrid,self).undelete_edge(e,*args,**kwargs)
if self.freeze:
pass
elif self.holding:
self.holding_nodes[ self.edges['nodes'][e,0] ] = 'undelete_edge'
self.holding_nodes[ self.edges['nodes'][e,1] ] = 'undelete_edge'
else:
self.dt_add_edge(e)
def merge_edges(self,e1,e2):
if self.verbose > 1:
print " live_dt: merge edges %d %d"%(e1,e2)
# the tricky thing here is that we don't know which of
# these edges will be removed by merge_edges - one
# of them will get deleted, and then deleted by our
# delete handler.
# the other one will get modified, so by the time we get
# control again after trigrid, we won't know what to update
# so - save the nodes...
saved_nodes = self.edges['nodes'][ [e1,e2] ]
remaining = super(LiveDtGrid,self).merge_edges(e1,e2)
if self.freeze:
pass
elif self.holding:
for n in saved_nodes.ravel():
self.holding_nodes[n] = 'merge_edges'
else:
if remaining == e1:
ab = saved_nodes[0]
else:
ab = saved_nodes[1]
# the one that is *not* remaining has already been deleted
# just update the other one.
try:
self.dt_remove_edge(remaining,nodes=ab)
except MissingConstraint:
print " on merge_edges, may have an intervener"
raise
self.dt_add_edge(remaining)
return remaining
def unmerge_edges(self,e1,e2,*args,**kwargs):
#print " top of live_dt unmerge_edges"
check_dt_after = False
if self.freeze:
pass
elif self.holding:
pass
else:
# this can be problematic if the middle node is exactly on
# the line between them, because re-inserting that node
# will pre-emptively segment the constrained edge.
try:
self.dt_remove_edge(e1)
except MissingConstraint:
print " got a missing constraint on merge edges - will verify that it's okay"
check_dt_after = True
#print " after pre-emptive remove_edge"
super(LiveDtGrid,self).unmerge_edges(e1,e2,*args,**kwargs)
#print " after call to super()"
if self.freeze:
pass
elif self.holding:
n1,n2 = self.edges['nodes'][e1]
n3,n4 = self.edges['nodes'][e2]
for n in [n1,n2,n3,n4]:
self.holding_nodes[ n ] = 'unmerge_edges'
else:
if check_dt_after:
AB = self.edges['nodes'][e1]
BC = self.edges['nodes'][e2]
B = np.intersect1d(AB,BC)[0]
A = np.setdiff1d(AB,B)[0]
C = np.setdiff1d(BC,B)[0]
print "while unmerging edges, a constraint was pre-emptively created, but will verify that now %d-%d-%d."%(A,B,C)
for e in self.DT.incident_constraints(self.nodes['vh'][B]):
v1 = e.face().vertex( (e.vertex() + 1)%3 )
v2 = e.face().vertex( (e.vertex() + 2)%3 )
if v1.info() == A or v2.info() == A:
A = None
elif v1.info() == B or v2.info() == B:
B = None
else:
print "while unmerging edge, the middle point has another constrained DT neighbor - surprising..."
if A is not None or B is not None:
print "Failed to verify that implicit constraint was there"
raise MissingConstraint
else:
#print " adding reverted edge e1 and e2"
self.dt_add_edge(e1)
# even though trigrid.merge_edges() calls delete_edge()
# on e2, it doesn't register an undelete_edge() b/c
# rollback=0.
self.dt_add_edge(e2)
# def unsplit_edge(...): # not supported by trigrid
def split_edge(self,nodeA,nodeB,nodeC):
if self.freeze:
pass
elif self.holding:
self.holding_nodes[nodeA] = 'split_edge'
self.holding_nodes[nodeB] = 'split_edge'
self.holding_nodes[nodeC] = 'split_edge'
else:
if self.verbose > 2:
print " split_edge: %d %d %d"%(nodeA,nodeB,nodeC)
e1 = self.nodes_to_edge([nodeA,nodeC])
try:
self.dt_remove_edge(e1)
except MissingConstraint:
print " got a missing constraint on split edge, but maybe the edge has already been split"
self.dt_remove_edge(e1,[nodeA,nodeB])
self.dt_remove_edge(e1,[nodeB,nodeC])
print " Excellent. The middle node had become part of the constraint"
e2 = super(LiveDtGrid,self).split_edge(nodeA,nodeB,nodeC)
if self.freeze:
pass
elif self.holding:
pass
else:
self.dt_add_edge(e1)
self.dt_add_edge(e2)
return e2
def delete_node_and_merge(self,n):
if self.freeze:
return super(LiveDtGrid,self).delete_node_and_merge(n)
if self.holding:
self.holding_nodes[n] = 'delete_node_and_merge'
else:
# remove any constraints going to n -
self.DT.remove_incident_constraints(self.nodes['vh'][n])
self.dt_remove( n )
# note that this is going to call merge_edges, before it
# calls delete_node() - and merge_edges will try to add the new
# constraint, which will fail if the middle node is collinear with
# the outside nodes. so freeze LiveDT updates, then here we clean up
self.freeze = 1
new_edge = super(LiveDtGrid,self).delete_node_and_merge(n)
if self.verbose > 2:
print " Got new_edge=%s from trigrid.delete_node_and_merge"%new_edge
self.freeze=0
if self.holding:
for n in self.edges['nodes'][new_edge]:
self.holding_nodes[n] = 'delete_node_and_merge'
else:
# while frozen we missed a merge_edges and a delete node.
# we just want to do them in the opposite order of what trigrid does.
self.dt_add_edge(new_edge)
return new_edge
### Useful ways for subclasses to get DT information:
def delaunay_face(self,pnt):
""" Returns node indices making up the face of the DT in which pnt lies.
"""
f = self.DT.locate( CGAL.Point_2(pnt[0],pnt[1]) )
n = [f.vertex(i).info() for i in [0,1,2]]
return n
def delaunay_neighbors(self,n):
""" returns an array of node ids that the DT connects the given node
to. Includes existing edges
"""
nbrs = []
# how do we stop on a circulator?
first_v = None
# somehow it fails HERE, with self.nodes['vh'][n] being an int, rather
# than a vertex handle.
for v in self.DT.incident_vertices(self.nodes['vh'][n]):
if first_v is None:
first_v = v
elif first_v == v:
break
if self.DT.is_infinite(v):
continue
# print "Looking for vertex at ",v.point()
# This is going to need something faster, or maybe the store info
# bits of cgal.
nbr_i = v.info() # np.where( self.nodes['vh'] == v )[0]
if nbr_i is None:
print " While looking for vertex at ",v.point()
raise Exception,"expected vertex handle->node, but instead got %s"%nbr_i
nbrs.append( nbr_i )
return np.array(nbrs)
def renumber(self):
mappings = super(LiveDtGrid,self).renumber()
# no longer have to modify the vh array - it gets rearranged by trigrid2
for i in range(len(self.nodes['vh'])):
self.nodes['vh'][i].set_info(i)
return mappings
def delaunay_edge_iter(self):
for e in self.DT.edges:
if self.DT.is_infinite(e):
continue
n1 = e.face().vertex( (e.vertex() + 1)%3 ).info()
n2 = e.face().vertex( (e.vertex() + 2)%3 ).info()
if n1 is not None and n2 is not None:
yield n1,n2
####
def plot_dt(self,clip=None):
edges = []
colors = []
gray = (0.7,0.7,0.7,1.0)
magenta = (1.0,0.0,1.0,1.0)
for e in self.DT.edges:
if self.DT.is_infinite(e):
continue
v1 = e.face().vertex( (e.vertex() + 1)%3 )
v2 = e.face().vertex( (e.vertex() + 2)%3 )
edges.append( [ [v1.point().x(),v1.point().y()],
[v2.point().x(),v2.point().y()] ] )
if self.DT.is_constrained(e):
colors.append(magenta)
else:
colors.append(gray)
segments = np.array(edges)
colors = np.array(colors)
if clip is None:
clip = self.default_clip
if clip is not None:
points_visible = (segments[...,0] >= clip[0]) & (segments[...,0]<=clip[1]) \
& (segments[...,1] >= clip[2]) & (segments[...,1]<=clip[3])
# so now clip is a bool array of length Nedges
clip = np.any( points_visible, axis=1)
segments = segments[clip,...]
colors = colors[clip,...]
coll = collections.LineCollection(segments,colors=colors)
ax = pylab.gca()
ax.add_collection(coll)
## Detecting self-intersections
def face_in_direction(self,vh,vec):
""" Starting at the vertex handle vh, look in the direction
of vec to choose a face adjacent to vh
"""
# vh: vertex handle
# vec: search direction as array
theta = np.arctan2(vec[1],vec[0])
# choose a starting face
best_f = None
f_circ = self.DT.incident_faces(vh)
first_f = f_circ.next()
f = first_f
while 1:
# get the vertices of this face:
vlist=[f.vertex(i) for i in range(3)]
# rotate to make v1 first:
vh_index = vlist.index(vh)
vlist = vlist[vh_index:] + vlist[:vh_index]
# then check the relative angles of the other two - they are in CCW order
pnts = np.array( [ [v.point().x(),v.point().y()] for v in vlist] )
delta01 = pnts[1] - pnts[0]
delta02 = pnts[2] - pnts[0]
theta01 = np.arctan2( delta01[1], delta01[0] )
theta02 = np.arctan2( delta02[1], delta02[0] )
#
d01 = (theta - theta01)%(2*np.pi)
d02 = (theta02 - theta)%(2*np.pi)
#print "starting point:",pnts[0]
#print "Theta01=%f Theta=%f Theta02=%f"%(theta01,theta,theta02)
if (d01 < np.pi) and (d02 < np.pi):
best_f = f
break
f = f_circ.next()
if f == first_f:
raise Exception,"Went all the way around..."
return best_f
def next_face(self,f,p1,vec):
""" find the next face from f, along the line through v in the direction vec,
return the face and the edge that was crossed, where the edge is a face,i tuple
"""
# First get the vertices that make up this face:
# look over the edges:
vlist=[f.vertex(i) for i in range(3)]
pnts = np.array( [ [v.point().x(),v.point().y()] for v in vlist] )
# check which side of the line each vertex is on:
left_vec = np.array( [-vec[1],vec[0]] )
left_distance = [ (pnts[i,0] - p1[0])*left_vec[0] + (pnts[i,1]-p1[1])*left_vec[1] for i in range(3)]
# And we want the edge that goes from a negative to positive left_distance.
# should end with i being the index of the start of the edge that we want
for i in range(3):
# This doesn't quite follow the same definitions as in CGAL -
# because we want to ensure that we get a consecutive list of edges
# The easy case - the query line exits through an edge that straddles
# the query line, that's the <
# the == part comes in where the query line exits through a vertex.
# in that case, we choose the edge to the left (arbitrary).
if left_distance[i] <= 0 and left_distance[(i+1)%3] > 0:
break
# so now the new edge is between vertex i,(i+1)%3, so in CGAL parlance
# that's
edge = (f,(i-1)%3)
new_face = f.neighbor( (i-1)%3 )
return edge,new_face
def line_walk_edges_new(self,n1=None,n2=None,v1=None,v2=None,
include_tangent=False,
include_coincident=True):
# Use the CGAL primitives to implement this in a hopefully more
# robust way.
# unfortunately we can't use the line_walk() circulator directly
# because the bindings enumerate the whole list, making it potentially
# very expensive.
# ultimately we want to know edges which straddle the query line
# as well as nodes that fall exactly on the line.
# is it sufficient to then return a mixed list of edges and vertices
# that fall on the query line?
# and any edge that is coincident with the query line will be included
# in the output.
# but what is the appropriate traversal cursor?
# when no vertices fall exactly on the query line, tracking a face
# is fine.
# but when the query line goes through a vertex, it's probably better
# to just record the vertex.
# so for a first cut - make sure that we aren't just directly connected:
if (n2 is not None) and (n1 is not None) and (n2 in self.delaunay_neighbors(n1)):
return []
if v1 is None:
v1 = self.nodes['vh'][n1]
if v2 is None:
v2 = self.nodes['vh'][n2]
# Get the points from the vertices, not self.nodes['x'], because in some cases
# (adjust_move_node) we may be probing
p1 = np.array([ v1.point().x(), v1.point().y()] )
p2 = np.array([ v2.point().x(), v2.point().y()] )
if self.verbose > 1:
print "Walking the line: ",p1,p2
hits = [ ['v',v1] ]
# do the search:
while hits[-1][1] != v2:
# if we just came from a vertex, choose a new face in the given direction
if hits[-1][0] == 'v':
if self.verbose > 1:
print "Last hit was the vertex at %s"%(hits[-1][1].point())
# like face_in_direction, but also check for possibility that
# an edge is coincident with the query line.
next_item = self.next_from_vertex( hits[-1][1],(p1,p2) )
if self.verbose > 1:
print "Moved from vertex to ",next_item
if next_item[0] == 'v':
# Find the edge connecting these two:
for e in self.DT.incident_edges( next_item[1] ):
f = e.face()
v_opp = e.vertex()
if f.vertex( (v_opp+1)%3 ) == hits[-1][1] or \
f.vertex( (v_opp+2)%3 ) == hits[-1][1]:
hits.append( ['e', (f,v_opp)] )
break
elif hits[-1][0] == 'f':
# either we cross over an edge into another face, or we hit
# one of the vertices.
next_item = self.next_from_face( hits[-1][1], (p1,p2) )
# in case the next item is also a face, go ahead and insert
# the intervening edge
if next_item[0]=='f':
middle_edge = None
for v_opp in range(3):
if self.verbose > 1:
print "Comparing %s to %s looking for the intervening edge"%(hits[-1][1].neighbor(v_opp),
next_item[1])
if hits[-1][1].neighbor(v_opp) == next_item[1]:
middle_edge = ['e', (hits[-1][1],v_opp)]
break
if middle_edge is not None:
hits.append( middle_edge )
else:
raise Exception,"Two faces in a row, but couldn't find the edge between them"
elif hits[-1][0] == 'e':
# This one is easy - just have to check which end of the edge is in the
# desired direction
next_item = self.next_from_edge( hits[-1][1], (p1,p2) )
hits.append( next_item )
if self.verbose > 1:
print "Got hits: ",hits
# but ignore the first and last, since they are the starting/ending points
hits = hits[1:-1]
# and since some of those CGAL elements are going to disappear, translate everything
# into node references
for i in range(len(hits)):
if hits[i][0] == 'v':
hits[i][1] = [ hits[i][1].info() ]
elif hits[i][0] == 'e':
f,v_opp = hits[i][1]
hits[i][1] = [ f.vertex( (v_opp+1)%3 ).info(), f.vertex( (v_opp+2)%3 ).info() ]
elif hits[i][0] == 'f':
f = hits[i][1]
hits[i][1] = [ f.vertex(0).info(),
f.vertex(1).info(),
f.vertex(2) ]
# have to go back through, and where successive items are faces, we must
# have crossed cleanly through an edge, and that should be inserted, too
return hits
## steppers for line_walk_edges_new
def next_from_vertex(self, vert, vec):
# from a vertex, we either go into one of the faces, or along an edge
qp1,qp2 = vec
last_left_distance=None
last_nbr = None
start = None
for nbr in self.DT.incident_vertices(vert):
pnt = np.array( [nbr.point().x(),nbr.point().y()] )
left_distance = distance_left_of_line(pnt, qp1,qp2 )
# This used to be inside the last_left_distance < 0 block, but it seems to me
# that if we find a vertex for which left_distance is 0, that's our man.
# NOPE - having it inside the block caused the code to discard a colinear vertex
# that was behind us.
# in the corner case of three colinear points, and we start from the center, both
# end points will have left_distance==0, and both will be preceeded by the infinite
# vertex. So to distinguish colinear points it is necessary to check distance in the
# desired direction.
if left_distance==0.0:
dx = pnt[0] - vert.point().x()
dy = pnt[1] - vert.point().y()
progress = dx * (qp2[0] - qp1[0]) + dy * (qp2[1] - qp1[1])
if progress > 0:
return ['v',nbr]
# Note that it's also possible for the infinite vertex to come up.
# this should be okay when the left_distance==0.0 check is outside the
# block below. If it were inside the block, then we would miss the
# case where we see the infinite vertex (which makes last_left_distance
# undefined), and then see the exact match.
if last_left_distance is not None and last_left_distance < 0:
# left_distance == 0.0 used to be here.
if left_distance > 0:
# what is the face between the last one and this one??
# it's vertices are vert, nbr, last_nbr
for face in self.DT.incident_faces(vert):
for j in range(3):
if face.vertex(j) == nbr:
for k in range(3):
if face.vertex(k) == last_nbr:
return ['f',face]
raise Exception,"Found a good pair of incident vertices, but failed to find the common face."
# Sanity check - if we've gone all the way around
if start is None:
start = nbr
else: # must not be the first time through the loop:
if nbr == start:
raise Exception,"This is bad - we checked all vertices and didn't find a good neighbor"
last_left_distance = left_distance
last_nbr = nbr
if self.DT.is_infinite(nbr):
last_left_distance = None
raise Exception,"Fell through!"
def next_from_edge(self, edge, vec):
# vec is the tuple of points defining the query line
qp1,qp2 = vec
# edge is a tuple of face and vertex index
v1 = edge[0].vertex( (edge[1]+1)%3 )
v2 = edge[0].vertex( (edge[1]+2)%3 )
# this means the edge was coincident with the query line
p1 = v1.point()
p2 = v2.point()
p1 = np.array( [p1.x(),p1.y()] )
p2 = np.array( [p2.x(),p2.y()] )
line12 = p2 - p1
if np.dot( line12, qp2-qp1 ) > 0:
return ['v',v2]
else:
return ['v',v1]
def next_from_face(self, f, vec):
qp1,qp2 = vec
# stepping through a face, along the query line qp1 -> qp2
# we exit the face either via an edge, or possibly exactly through a
# vertex.
# A lot like next_face(), but hopefully more robust handling of
# exiting the face by way of a vertex.
# First get the vertices that make up this face:
# look over the edges:
vlist=[f.vertex(i) for i in range(3)]
pnts = np.array( [ [v.point().x(),v.point().y()] for v in vlist] )
# check which side of the line each vertex is on:
# HERE is where the numerical issues come up.
# could possibly do this in terms of the end points of the query line, in order to
# at least robustly handle the starting and ending points.
left_distance = [ distance_left_of_line(pnts[i], qp1,qp2 ) for i in range(3)]
# And we want the edge that goes from a negative to positive left_distance.
# should end with i being the index of the start of the edge that we want
for i in range(3):
# This doesn't quite follow the same definitions as in CGAL -
# because we want to ensure that we get a consecutive list of edges
# The easy case - the query line exits through an edge that straddles
# the query line, that's the <
# the == part comes in where the query line exits through a vertex.
# in that case, we choose the edge to the left (arbitrary).
if left_distance[i] <= 0 and left_distance[(i+1)%3] > 0:
break
# sanity check
if i==2:
raise Exception,"Trying to figure out how to get out of a face, and nothing looks good"
# Two cases - leaving via vertex, or crossing an edge internally.
if left_distance[i]==0:
return ['v',vlist[i]]
else:
# so now the new edge is between vertex i,(i+1)%3, so in CGAL parlance
# that's
new_face = f.neighbor( (i-1)%3 )
return ['f',new_face]
##
def line_walk_edges(self,n1=None,n2=None,v1=None,v2=None,
include_tangent=False,
include_coincident=True):
""" for a line starting at node n1 or vertex handle v1 and
ending at node n2 or vertex handle v2, return all the edges
that intersect.
"""
# this is a bit dicey in terms of numerical robustness -
# face_in_direction is liable to give bad results when multiple faces are
# indistinguishable (like a colinear set of points with many degenerate faces
# basically on top of each other).
# How can this be made more robust?
# When the query line exactly goes through one or more vertex stuff starts
# going nuts.
# So is it possible to handle this more intelligently?
# there are 3 possibilities for intersecting edges:
# (1) intersect only at an end point, i.e. endpoint lies on query line
# (2) intersect in interior of edge - one end point on one side, other endpoint
# on the other side of the query line
# (3) edge is coincident with query line
# so for a first cut - make sure that we aren't just directly connected:
if (n2 is not None) and (n1 is not None) and (n2 in self.delaunay_neighbors(n1)):
return []
if v1 is None:
v1 = self.nodes['vh'][n1]
if v2 is None:
v2 = self.nodes['vh'][n2]
# Get the points from the vertices, not self.nodes['x'], because in some cases
# (adjust_move_node) we may be probing
p1 = np.array([ v1.point().x(), v1.point().y()] )
p2 = np.array([ v2.point().x(), v2.point().y()] )
# print "Walking the line: ",p1,p2
vec = p2 - p1
unit_vec = vec / norm(vec)
pnt = p1
f1 = self.face_in_direction(v1,vec)
f2 = self.face_in_direction(v2,-vec)
# do the search:
f_trav = f1
edges = []
while 1:
# print "line_walk_edges: traversing face:"
# print [f_trav.vertex(i).point() for i in [0,1,2]]
# Stop condition: we're in a face containing the final vertex
# check the vertices directly, rather than the face
still_close = 0
for i in range(3):
if f_trav.vertex(i) == v2:
return edges
if not still_close:
# Check to see if this vertex is beyond the vertex of interest
vertex_i_pnt = np.array( [f_trav.vertex(i).point().x(),f_trav.vertex(i).point().y()] )
if norm(vec) > np.dot( vertex_i_pnt - p1, unit_vec):
still_close = 1
if not still_close:
# We didn't find any vertices of this face that were as close to where we started
# as the destination was, so we must have passed it.
print "BAILING: n1=%s n2=%s v1=%s v2=%s"%(n1,n2,v1,v2)
raise Exception,"Yikes - line_walk_edges exposed its numerical issues. We traversed too far."
return edges
edge,new_face = self.next_face(f_trav,pnt,vec)
edges.append(edge)
f_trav = new_face
return edges
def shoot_ray(self,n1,vec,max_dist=None):
""" Shoot a ray from self.nodes['x'][n] in the given direction vec
returns (e_index,pnt), the first edge that it encounters and the location
of the intersection
max_dist: stop checking beyond this distance -- currently doesn't make it faster
but will return None,None if the point that it finds is too far away
"""
v1 = self.nodes['vh'][n1]
vec = vec / norm(vec) # make sure it's a unit vector
pnt = self.nodes['x'][n1]
f1 = self.face_in_direction(v1,vec)
# do the search:
f_trav = f1
while 1:
edge,new_face = self.next_face(f_trav,pnt,vec)
# make that into a cgal edge:
e = CGAL.Triangulations_2.Edge(*edge)
if max_dist is not None:
# Test the distance as we go...
face,i = edge
va = face.vertex((i+1)%3)
vb = face.vertex((i-1)%3)
pa = va.point()
pb = vb.point()
d1a = np.array([pa.x()-pnt[0],pa.y() - pnt[1]])
# alpha * vec + beta * ab = d1a
# | vec[0] ab[0] | | alpha | = | d1a[0] |
# | vec[1] ab[1] | | beta | = | d1a[1] |
A = np.array( [[vec[0], pb.x() - pa.x()],
[vec[1], pb.y() - pa.y()]] )
alpha_beta = solve(A,d1a)
dist = alpha_beta[0]
if dist > max_dist:
return None,None
if self.DT.is_constrained(e):
# print "Found a constrained edge"
break
f_trav = new_face
na = va.info()
nb = vb.info()
if (na is None) or (nb is None):
raise Exception,"Constrained edge is missing at least one node index"
if max_dist is None:
# Compute the point at which they intersect:
ab = self.nodes['x'][nb] - self.nodes['x'][na]
d1a = self.nodes['x'][na] - pnt
# alpha * vec + beta * ab = d1a
# | vec[0] ab[0] | | alpha | = | d1a[0] |
# | vec[1] ab[1] | | beta | = | d1a[1] |
A = np.array( [[vec[0],ab[0]],[vec[1],ab[1]]] )
alpha_beta = solve(A,d1a)
else:
pass # already calculated alpha_beta
p_int = pnt + alpha_beta[0]*vec
edge_id = self.find_edge((na,nb))
return edge_id,p_int
def check_line_is_clear(self,n1=None,n2=None,v1=None,v2=None,p1=None,p2=None):
""" returns a list of vertex tuple for constrained segments that intersect
the given line
"""
# if points were given, create some temporary vertices
if p1 is not None:
cp1 = CGAL.Point_2( p1[0], p1[1] )
v1 = self.DT.insert(cp1) ; v1.set_info('tmp')
if p2 is not None:
cp2 = CGAL.Point_2( p2[0], p2[1] )
v2 = self.DT.insert(cp2) ; v2.set_info('tmp')
edges = self.line_walk_edges(n1=n1,n2=n2,v1=v1,v2=v2)
constrained = []
for f,i in edges:
e = CGAL.Triangulations_2.Edge(f,i)
if self.DT.is_constrained(e):
vA = e.face().vertex( (e.vertex()+1)%3 )
vB = e.face().vertex( (e.vertex()+2)%3 )
print "Conflict info: ",vA.info(),vB.info()
constrained.append( (vA,vB) )
if p1 is not None:
self.DT.remove( v1 )
if p2 is not None:
self.DT.remove( v2 )
return constrained
def check_line_is_clear_new(self,n1=None,n2=None,v1=None,v2=None,p1=None,p2=None):
""" returns a list of vertex tuple for constrained segments that intersect
the given line.
in the case of vertices that are intersected, just a tuple of length 1
(and assumes that all vertices qualify as constrained)
"""
# if points were given, create some temporary vertices
if p1 is not None:
cp1 = CGAL.Point_2( p1[0], p1[1] )
v1 = self.DT.insert(cp1) ; v1.set_info('tmp')
if p2 is not None:
cp2 = CGAL.Point_2( p2[0], p2[1] )
v2 = self.DT.insert(cp2) ; v2.set_info('tmp')
crossings = self.line_walk_edges_new(n1=n1,n2=n2,v1=v1,v2=v2)
constrained = []
for crossing_type,crossing in crossings:
if crossing_type == 'f':
continue
if crossing_type == 'v':
constrained.append( (crossing_type,crossing) )
continue
if crossing_type == 'e':
n1,n2 = crossing
if self.verbose > 1:
print "Got potential conflict with edge",n1,n2
try:
self.find_edge( (n1,n2) )
constrained.append( ('e',(n1,n2)) )
except trigrid.NoSuchEdgeError:
pass
if p1 is not None:
self.DT.remove( v1 )
if p2 is not None:
self.DT.remove( v2 )
return constrained
## DT-based "smoothing"
# First, make sure the boundary is sufficiently sampled
def subdivide(self,min_edge_length=1.0,edge_ids=None):
""" Like medial_axis::subdivide_iterate -
Add nodes along the boundary as needed to ensure that the boundary
is well represented in channels
[ from medial_axis ]
Find edges that need to be sampled with smaller
steps and divide them into two edges.
returns the number of new edges / nodes
method: calculate voronoi radii
iterate over edges in boundary
for each edge, find the voronoi point that they have
in common. So this edge should be part of a triangle,
and we are getting the center of that triangle.
the voronoi radius with the distance between the voronoi
point and the edge. If the edge is too long and needs to
be subdivided, it will be long (and the voronoi radius large)
compared to the distance between the edge and the vor. center.
"""
if edge_ids is None:
print "Considering all edges for subdividing"
edge_ids = range(self.Nedges())
else:
print "Considering only %d supplied edges for subdividing"%len(edge_ids)
to_subdivide = []
# Also keep a list of constrained edges of DT cells for which another edge
# has been selected for subdivision.
neighbors_of_subdivide = {}
print "Choosing edges to subdivide"
for i in edge_ids: # range(self.Nedges()):
if self.edges['stat'][i] != trigrid2.STAT_OK:
continue # edge has been deleted
# this only works when one side is unpaved and the other boundary -
if self.edges['cells'][i,0] != trigrid.UNMESHED or self.edges['cells'][i,1] != trigrid.BOUNDARY:
print "Skipping edge %d because it has weird cell ids"%i
continue
a,b = self.edges['nodes'][i]
# consult the DT to find who the third node is:
a_nbrs = self.delaunay_neighbors(a)
b_nbrs = self.delaunay_neighbors(b)
abc = np.array([self.nodes['x'][a],self.nodes['x'][b],[0,0]])
c = None
for nbr in a_nbrs:
if nbr in b_nbrs:
# does it lie to the left of the edge?
abc[2,:] = self.nodes['x'][nbr]
if trigrid.is_ccw(abc):
c = nbr
break
if c is None:
print "While looking at edge %d, %s - %s"%(i,self.nodes['x'][a],self.nodes['x'][b])
raise Exception,"Failed to find the third node that makes up an interior triangle"
pntV = trigrid.circumcenter(abc[0],abc[1],abc[2])
# compute the point-line distance between
# this edge and the v center, then compare to
# the distance from the endpoint to that
# vcenter
pntA = self.nodes['x'][a]
pntB = self.nodes['x'][b]
v_radius = np.sqrt( ((pntA-pntV)**2).sum() )
# This calculates unsigned distance - with Triangle, that's fine because
# it takes care of the Steiner points, but with CGAL we do it ourselves.
# line_clearance = np.sqrt( (( 0.5*(pntA+pntB) - pntV)**2).sum() )
ab = (pntB - pntA)
ab = ab / np.sqrt( np.sum(ab**2) )
pos_clearance_dir = np.array( [-ab[1],ab[0]] )
av = pntV - pntA
line_clearance = av[0]*pos_clearance_dir[0] + av[1]*pos_clearance_dir[1]
# Why do I get some bizarrely short edges?
ab = np.sqrt( np.sum( (pntA - pntB)**2 ) )
if v_radius > 1.2*line_clearance and v_radius > min_edge_length and ab>min_edge_length:
to_subdivide.append(i)
# Also make note of the other edges of this same DT triangle
for maybe_nbr in [ [a,c], [b,c] ]:
# could be an internal DT edge, or a real edge
try:
nbr_edge = self.find_edge(maybe_nbr)
neighbors_of_subdivide[nbr_edge] = 1
except trigrid.NoSuchEdgeError:
pass
# else:
# # Special handling of tight corners:
# # Actually, why is this not already being taken care of?
# # Look in older versions to see what was going on...
for i in to_subdivide:
if neighbors_of_subdivide.has_key(i):
del neighbors_of_subdivide[i]
a,b = self.edges['nodes'][i]
elts = self.all_iters_for_node(a)
if len(elts) != 1:
raise Exception,"How is there not exactly one iter for this node!?"
scale = 0.5*np.sqrt( np.sum( (self.nodes['x'][a]-self.nodes['x'][b])**2 ) )
new_elt = self.resample_boundary(elts[0],'forward',
local_scale=scale,
new_node_stat=self.node_data[a,0])
# keep track of any edges that change:
e1,e2 = self.pnt2edges(new_elt.data)
neighbors_of_subdivide[e1] = 1
neighbors_of_subdivide[e2] = 1
subdivided = np.array( neighbors_of_subdivide.keys() )
return subdivided
def subdivide_iterate(self,min_edge_length=1.0):
modified_edges = None
while 1:
# It wasn't enough to just test for no modified edges - rather than
# trying to be clever about checking exactly edges that may have
# been affected by a split, have nested iterations, and stop only
# when globally there are no modified edges
new_modified_edges = self.subdivide(min_edge_length=min_edge_length,
edge_ids = modified_edges)
print "Subdivide made %d new nodes"%(len(new_modified_edges)/2)
if len(new_modified_edges) == 0:
if modified_edges is None:
# this means we considered all edges, and still found nobody
# to split
break
else:
# this means we were only considering likely offenders -
# step back and consider everyone
print "Will reconsider all edges..."
modified_edges = None
else:
modified_edges = new_modified_edges
def smoothed_poly(self,density,min_edge_length=1.0):
""" Returns a polygon for the boundary that has all 'small' concave features
removed. Modifies the boundary points, but only by adding new samples evenly
between originals.
"""
# Make sure that all edges are sufficiently sampled:
self.subdivide_iterate(min_edge_length=min_edge_length)
# The process (same as in smoother.py):
# For all _interior_ DT cells
# calculate circum-radius
# mark for deletion any cell with radius < scale/2,
# with scale calculated at circumcenter
# For all non-deleted cells, create an array of all edges
# The notes in smoother say that if an edge appears exactly once
# then it should be kept.
# Edges that appear twice are internal to the domain.
# If/when degenerate edges take part in this, they will have to be
# handled specially, since they *should* have two adjacent, valid, cells.
# What is faster?
# (a) iterate over known boundary edges, grabbing cells to the left,
# and checking against a hash to see that the cell hasn't been included
# already
# (b) iterate over DT faces, checking to see if it's an internal face or not
# by checking ab,bc,ca against the edge hash?
# probably (b), since we already have this hash built.
# Actually, (b) isn't sufficient - there are triangles that are internal, but
# have no boundary edges.
# And (a) isn't good either - it would skip over any triangles that are entirely
# internal _or_ entirely external (i.e. share no edges with the boundary).
# Is there a way to do this by tracing edges? Start at some vertex on a clist.
# check the next edge forward - is the radius of the DT face to its left big enough?
# If so, we move forward.
# If not, detour?
# That's not quite enough, though. Really need to be checking every triangle incident
# to the vertex, not just the ones incident to the edges.
# So for simplicity, why not use the traversal of the edges to enumerate internal cells,
# then proceed as before.
cells = self.dt_interior_cells()
print "Marking for deletion DT faces that are too small"
points = self.nodes['x'][cells]
vcenters = trigrid.circumcenter(points[:,0],
points[:,1],
points[:,2])
# Threshold on the radius, squared
r2_thresh = (density(vcenters) / self.scale_ratio_for_cutoff)**2
# r^2 for each internal DT face
r2 = np.sum( (vcenters - points[:,0,:])**2,axis=1)
valid = r2 >= r2_thresh
# From here on out it follows smoother.py very closely...
print "Compiling valid edges"
# expands cells into edges
good_cells = cells[valid]
all_edges = good_cells[:,np.array([[0,1],[1,2],[2,0]])]
# cells is Nfaces x 3
# all_edges is then Nfaces x 3 x 2
# combine the first two dimensions, so we have a regular edges array
all_edges = all_edges.reshape( (-1,2) )
print "building hash of edges"
edge_hash = {}
for i in range(len(all_edges)):
k = all_edges[i,:]
if k[0] > k[1]:
k=k[::-1]
k = tuple(k)
if not edge_hash.has_key(k):
edge_hash[k] = 0
edge_hash[k] += 1
print "Selecting boundary edges"
# good edges are then edges that appear in exactly one face
good_edges = []
for k in edge_hash:
if edge_hash[k] == 1:
good_edges.append(k)
good_edges = np.array(good_edges)
print "Finding polygons from edges"
tgrid = trigrid.TriGrid(points=self.nodes['x'],
edges =good_edges)
tgrid.verbose = 2
polygons = tgrid.edges_to_polygons(None) # none=> use all edges
self.smooth_all_polygons = polygons # for debugging..
print "done with smoothing"
return polygons[0]
def dt_interior_cells(self):
print "Finding interior cells from full Delaunay Triangulation"
interior_cells = []
for f in self.DT.faces:
a = f.vertex(0).info()
b = f.vertex(1).info()
c = f.vertex(2).info()
# going to be slow...
# How to test whether this face is internal:
# Arbitrarily choose a vertex: a
#
# Find an iter for which the face abc lies to the left of the boundary
internal = 0
for elt in self.all_iters_for_node(a):
d = self.nodes['x'][elt.nxt.data] - self.nodes['x'][a]
theta_afwd = np.arctan2(d[1],d[0])
d = self.nodes['x'][b] - self.nodes['x'][a]
theta_ab = np.arctan2(d[1],d[0])
d = self.nodes['x'][elt.prv.data] - self.nodes['x'][a]
theta_aprv = np.arctan2(d[1],d[0])
dtheta_b = (theta_ab - theta_afwd) % (2*np.pi)
dtheta_elt = (theta_aprv - theta_afwd) % (2*np.pi)
# if b==elt.nxt.data, then dtheta_b==0.0 - all good
if dtheta_b >= 0 and dtheta_b < dtheta_elt:
internal = 1
break
if internal:
interior_cells.append( [a,b,c] )
cells = np.array(interior_cells)
return cells
def dt_all_cells(self):
all_cells = []
for f in self.DT.faces:
a = f.vertex(0).info()
b = f.vertex(1).info()
c = f.vertex(2).info()
all_cells.append( [a,b,c] )
return np.array(all_cells)
def apollonius_scale(self,r,min_edge_length=1.0,process_islands=True):
""" Return an apollonius based field giving the scale subject to
the local feature size of geo and the telescoping rate r
"""
self.subdivide_iterate(min_edge_length=min_edge_length)
dt_cells = self.dt_interior_cells()
points = self.nodes['x'][dt_cells]
vcenters = trigrid.circumcenter(points[:,0],
points[:,1],
points[:,2])
radii = np.sqrt( np.sum( (vcenters - points[:,0,:])**2,axis=1) )
diam = 2*radii
if process_islands:
print "Hang on. Adding scale points for islands"
island_centers = []
island_scales = []
for int_ring in self.poly.interiors:
p = int_ring.convex_hull
points = np.array(p.exterior.coords)
center = points.mean(axis=0)
# brute force - find the maximal distance between
# any two points. probably a smart way to do this,
# but no worries...
max_dsqr = 0
for i in range(len(points)):
pa = points[i]
for j in range(i,len(points)):
d = ((pa - points[j])**2).sum()
max_dsqr = max(d,max_dsqr)
feature_scale = np.sqrt( max_dsqr )
print "Ring has scale of ",feature_scale
island_centers.append( center )
# this very roughly says that we want at least 4 edges
# for representing this thing.
# island_scales.append( feature_scale / 2.0)
# actually it's not too hard to have a skinny island
# 2 units long that gets reduced to a degenerate pair
# of edges, so go conservative here:
island_scales.append( feature_scale / 3.0 )
island_centers = np.array(island_centers)
island_scales = np.array(island_scales)
if len(island_centers) > 0:
vcenters = np.concatenate( (vcenters,island_centers) )
diam = np.concatenate( (diam,island_scales) )
print "Done with islands"
scale = field.ApolloniusField(vcenters,diam)
return scale
def dt_clearance(self,n):
"""POORLY TESTED
Returns the diameter of the smallest circumcircle (?) of a face
incident to the node n. Currently this doesn't work terribly well
because sliver triangles will create arbitrarily small clearances
at obtuse angles.
"""
diams = []
f_circ = self.DT.incident_faces( self.nodes['vh'][n] )
first_f = f_circ.next()
f = first_f
for f in f_circ:
if f == first_f:
break
diams.append( self.face_diameter(f) )
return min(diams)
def face_center(self,face):
nodes = np.array( [face.vertex(j).info() for j in range(3)] )
points = self.nodes['x'][nodes]
return trigrid.circumcenter(points[0],points[1],points[2])
def face_diameter(self,face):
nodes = np.array( [face.vertex(j).info() for j in range(3)] )
points = self.nodes['x'][nodes]
ccenter = trigrid.circumcenter(points[0],points[1],points[2])
return 2*norm(points[0] - ccenter)
def __getstate__(self):
d = super(LiveDtGrid,self).__getstate__()
d['DT'] = 'rebuild'
d['nodes']['vh'] = None
return d
except ImportError,exc:
print "CGAL unavailable."
print exc
import orthomaker
class LiveDtGrid(orthomaker.OrthoMaker):
""" placeholder for live delaunay triangulation code """
has_dt = 0
pending_conflicts = []
def hold(self):
pass
def release(self):
pass
def delaunay_neighbors(self,n):
return []
| gpl-2.0 |
GoogleCloudPlatform/mlops-on-gcp | on_demand/kfp-caip-sklearn/lab-03-kfp-cicd/pipeline/helper_components.py | 12 | 2846 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Helper components."""
from typing import NamedTuple
def retrieve_best_run(
project_id: str, job_id: str
) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),
('max_iter', int)]):
"""Retrieves the parameters of the best Hypertune run."""
from googleapiclient import discovery
from googleapiclient import errors
ml = discovery.build('ml', 'v1')
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = ml.projects().jobs().get(name=job_name)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print('Unexpected error')
print(response)
best_trial = response['trainingOutput']['trials'][0]
metric_value = best_trial['finalMetric']['objectiveValue']
alpha = float(best_trial['hyperparameters']['alpha'])
max_iter = int(best_trial['hyperparameters']['max_iter'])
return (metric_value, alpha, max_iter)
def evaluate_model(
dataset_path: str, model_path: str, metric_name: str
) -> NamedTuple('Outputs', [('metric_name', str), ('metric_value', float),
('mlpipeline_metrics', 'Metrics')]):
"""Evaluates a trained sklearn model."""
#import joblib
import pickle
import json
import pandas as pd
import subprocess
import sys
from sklearn.metrics import accuracy_score, recall_score
df_test = pd.read_csv(dataset_path)
X_test = df_test.drop('Cover_Type', axis=1)
y_test = df_test['Cover_Type']
# Copy the model from GCS
model_filename = 'model.pkl'
gcs_model_filepath = '{}/{}'.format(model_path, model_filename)
print(gcs_model_filepath)
subprocess.check_call(['gsutil', 'cp', gcs_model_filepath, model_filename],
stderr=sys.stdout)
with open(model_filename, 'rb') as model_file:
model = pickle.load(model_file)
y_hat = model.predict(X_test)
if metric_name == 'accuracy':
metric_value = accuracy_score(y_test, y_hat)
elif metric_name == 'recall':
metric_value = recall_score(y_test, y_hat)
else:
metric_name = 'N/A'
metric_value = 0
# Export the metric
metrics = {
'metrics': [{
'name': metric_name,
'numberValue': float(metric_value)
}]
}
return (metric_name, metric_value, json.dumps(metrics))
| apache-2.0 |
cxhernandez/msmbuilder | msmbuilder/tests/test_preprocessing.py | 3 | 8031 | import numpy as np
from numpy.testing.decorators import skipif
try:
from sklearn.preprocessing import (FunctionTransformer as
FunctionTransformerR)
from msmbuilder.preprocessing import FunctionTransformer
HAVE_FT = True
except:
HAVE_FT = False
try:
from sklearn.preprocessing import MinMaxScaler as MinMaxScalerR
from msmbuilder.preprocessing import MinMaxScaler
HAVE_MMS = True
except:
HAVE_MMS = False
try:
from sklearn.preprocessing import MaxAbsScaler as MaxAbsScalerR
from msmbuilder.preprocessing import MaxAbsScaler
HAVE_MAS = True
except:
HAVE_MAS = False
try:
from sklearn.preprocessing import RobustScaler as RobustScalerR
from msmbuilder.preprocessing import RobustScaler
HAVE_RS = True
except:
HAVE_RS = False
try:
from sklearn.preprocessing import StandardScaler as StandardScalerR
from msmbuilder.preprocessing import StandardScaler
HAVE_SS = True
except:
HAVE_SS = False
from sklearn.preprocessing import (Binarizer as BinarizerR,
Imputer as ImputerR,
KernelCenterer as KernelCentererR,
LabelBinarizer as LabelBinarizerR,
MultiLabelBinarizer as MultiLabelBinarizerR,
Normalizer as NormalizerR,
PolynomialFeatures as PolynomialFeaturesR)
from ..preprocessing import (Binarizer, Imputer, KernelCenterer,
LabelBinarizer, MultiLabelBinarizer,
Normalizer, PolynomialFeatures, Butterworth,
EWMA, DoubleEWMA)
random = np.random.RandomState(42)
trajs = [random.randn(100, 3) for _ in range(5)]
labels = [random.randint(low=0, high=5, size=100).reshape(-1, 1)
for _ in range(5)]
def test_butterworth():
butterworth = Butterworth()
y1 = butterworth.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_ewma():
ewma = EWMA(span=5)
y1 = ewma.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_doubleewma():
dewma = DoubleEWMA(span=5)
y1 = dewma.transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_binarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Binarizer
# with sklearn.preprocessing.Binarizer
binarizerr = BinarizerR()
binarizerr.fit(np.concatenate(trajs))
binarizer = Binarizer()
binarizer.fit(trajs)
y_ref1 = binarizerr.transform(trajs[0])
y1 = binarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_FT, 'this test requires sklearn >0.17.0')
def test_functiontransformer_vs_sklearn():
# Compare msmbuilder.preprocessing.FunctionTransformer
# with sklearn.preprocessing.FunctionTransformer
functiontransformerr = FunctionTransformerR()
functiontransformerr.fit(np.concatenate(trajs))
functiontransformer = FunctionTransformer()
functiontransformer.fit(trajs)
y_ref1 = functiontransformerr.transform(trajs[0])
y1 = functiontransformer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_imputer_vs_sklearn():
# Compare msmbuilder.preprocessing.Imputer
# with sklearn.preprocessing.Imputer
imputerr = ImputerR()
imputerr.fit(np.concatenate(trajs))
imputer = Imputer()
imputer.fit(trajs)
y_ref1 = imputerr.transform(trajs[0])
y1 = imputer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_kernelcenterer_vs_sklearn():
# Compare msmbuilder.preprocessing.KernelCenterer
# with sklearn.preprocessing.KernelCenterer
kernelcentererr = KernelCentererR()
kernelcentererr.fit(np.concatenate(trajs))
kernelcenterer = KernelCenterer()
kernelcenterer.fit(trajs)
y_ref1 = kernelcentererr.transform(trajs[0])
y1 = kernelcenterer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_labelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.LabelBinarizer
# with sklearn.preprocessing.LabelBinarizer
labelbinarizerr = LabelBinarizerR()
labelbinarizerr.fit(np.concatenate(labels))
labelbinarizer = LabelBinarizer()
labelbinarizer.fit(labels)
y_ref1 = labelbinarizerr.transform(labels[0])
y1 = labelbinarizer.transform(labels)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_multilabelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.MultiLabelBinarizer
# with sklearn.preprocessing.MultiLabelBinarizer
multilabelbinarizerr = MultiLabelBinarizerR()
multilabelbinarizerr.fit(np.concatenate(trajs))
multilabelbinarizer = MultiLabelBinarizer()
multilabelbinarizer.fit(trajs)
y_ref1 = multilabelbinarizerr.transform(trajs[0])
y1 = multilabelbinarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MMS, 'this test requires sklearn >0.17.0')
def test_minmaxscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MinMaxScaler
# with sklearn.preprocessing.MinMaxScaler
minmaxscalerr = MinMaxScalerR()
minmaxscalerr.fit(np.concatenate(trajs))
minmaxscaler = MinMaxScaler()
minmaxscaler.fit(trajs)
y_ref1 = minmaxscalerr.transform(trajs[0])
y1 = minmaxscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MAS, 'this test requires sklearn >0.17.0')
def test_maxabsscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MaxAbsScaler
# with sklearn.preprocessing.MaxAbsScaler
maxabsscalerr = MaxAbsScalerR()
maxabsscalerr.fit(np.concatenate(trajs))
maxabsscaler = MaxAbsScaler()
maxabsscaler.fit(trajs)
y_ref1 = maxabsscalerr.transform(trajs[0])
y1 = maxabsscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_normalizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Normalizer
# with sklearn.preprocessing.Normalizer
normalizerr = NormalizerR()
normalizerr.fit(np.concatenate(trajs))
normalizer = Normalizer()
normalizer.fit(trajs)
y_ref1 = normalizerr.transform(trajs[0])
y1 = normalizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_RS, 'this test requires sklearn >0.17.0')
def test_robustscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.RobustScaler
# with sklearn.preprocessing.RobustScaler
robustscalerr = RobustScalerR()
robustscalerr.fit(np.concatenate(trajs))
robustscaler = RobustScaler()
robustscaler.fit(trajs)
y_ref1 = robustscalerr.transform(trajs[0])
y1 = robustscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_SS, 'this test requires sklearn >0.17.0')
def test_standardscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.StandardScaler
# with sklearn.preprocessing.StandardScaler
standardscalerr = StandardScalerR()
standardscalerr.fit(np.concatenate(trajs))
standardscaler = StandardScaler()
standardscaler.fit(trajs)
y_ref1 = standardscalerr.transform(trajs[0])
y1 = standardscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_polynomialfeatures_vs_sklearn():
# Compare msmbuilder.preprocessing.PolynomialFeatures
# with sklearn.preprocessing.PolynomialFeatures
polynomialfeaturesr = PolynomialFeaturesR()
polynomialfeaturesr.fit(np.concatenate(trajs))
polynomialfeatures = PolynomialFeatures()
polynomialfeatures.fit(trajs)
y_ref1 = polynomialfeaturesr.transform(trajs[0])
y1 = polynomialfeatures.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
| lgpl-2.1 |
mchl02/ComparingDataWithPython | ComparingThreeDataSamples.py | 1 | 4295 | import matplotlib
import fileinput
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
from math import *
"""so to plot for the x axis it is just going to be numbers 1- 240"""
#file_name = "/Users/MLee/Desktop/testdata4.txt"
file_name1 = "/Users/MLee/Desktop/testdata4.txt"
file_name2 = "/Users/MLee/Desktop/testdata4-comp.txt"
file_name3 = "/Users/MLee/Desktop/testdata4-reconst.txt"
#My Lists are below
x_axis = []
y_axis = []
reduced_x_axis = []
reduced_y_axis = []
final_reduced_x_axis = []
final_reduced_y_axis = []
reconstructed_x_axis = []
reconstructed_y_axis = []
#Below is the code for the first file
counter = 0
for legnth in fileinput.input([file_name1]):
counter = counter + 1
print "The amount of values in the original data is" ,(counter)
#The amount of values in the first is 240 so xrange(240)
for x in xrange(240):
x_axis.append(x)
for data in fileinput.input([file_name1]):
paring = data.split()
y_axis.append(float(paring[0]))
print "This is the original data X axis" ,x_axis
print "This is the original data Y axis" ,y_axis
#Below is the code for the second file
print "The amount of values in the reduced data is 15"
"""for moreGX in xrange(240):
reconstructed_x_axis.append(moreGX)
"""
for xtra in fileinput.input([file_name2]):
parsing = xtra.split()
reduced_y_axis.append(float(parsing[0]))
#Below is the code for the third file
counter2 = 0
for all_numbers in fileinput.input([file_name3]):
counter2 = counter2 + 1
for hj in xrange(15):
final_reduced_x_axis.append(hj)
#final_reduced_x_axis.append(reduced_x_axis[1:16])
print "This is the final reduced x axis" ,final_reduced_x_axis
#This will make the value only be 15
final_reduced_y_axis.append(reduced_y_axis[1:16])
print "This is the final reduced Y axis" ,final_reduced_y_axis
#Parsing below
for moreG in fileinput.input([file_name3]):
parsing2 = moreG.split()
reconstructed_y_axis.append(float(parsing2[0]))
#Making sure legnth is 240
counter3 = 0
for hmm_numbers in xrange(240):
reconstructed_x_axis.append(hmm_numbers)
counter3 = counter3 + 1
#Printing reconstructed code
print "The amount of values in the reconstructed data is" ,counter3
print "This is the reconstructed X axis" ,reconstructed_x_axis
print "This is the reconstructed Y axis" ,reconstructed_y_axis
#Below is all the Tkinter stuff
root = Tk.Tk()
root.attributes('-fullscreen', True)
root.wm_title("Comparing Three plots")
f = Figure(figsize=(5,3), dpi=100)
canvas = FigureCanvasTkAgg(f, master=root)
canvas.get_tk_widget().pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.show()
#f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(131)
a.scatter(x_axis,y_axis)
a.set_title("Original Data")
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
#canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#toolbar = NavigationToolbar2TkAgg( canvas, root )
#toolbar.update()
#canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
x_io = [1,2]
y_io = [1,2]
#hill = Figure(figsize=(5,4), dpi=100)
apple = f.add_subplot(132)
apple.set_title("Reduced data")
apple.scatter(final_reduced_x_axis, final_reduced_y_axis)
#canvas1 = FigureCanvasTkAgg(hill, master=root)
canvas.show()
#canvas1.get_tk_widget().pack(side=Tk.BOTTOM, fill=Tk.BOTH, expand=1)
#canvas1._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#canvas2 = FigureCanvasTkAgg(hill, master=root)
#hill2 = Figure(figsize=(5,4), dpi = 100)
pen = f.add_subplot(133)
pen.set_title("Reconstructed data")
#This wil graph the third plot which is the reconstructed data
pen.scatter(reconstructed_x_axis, reconstructed_y_axis)
#canvas3 = FigureCanvasTkAgg(hill2, master=root)
canvas.show()
#canvas3.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#toolbar = NavigationToolbar2TkAgg( canvas, root )
#toolbar.update()
#canvas3._tkcanvas.pack(side=Tk.BOTTOM, fill=Tk.BOTH, expand=1)
Tk.mainloop() | mit |
yuyu2172/ilv | ilv/collect_results/collect_results_chainer.py | 1 | 1262 | import json
import os
import pandas as pd
import pickle
from ilv.collect_results.interactive import interactive
def read_json(path):
with open(path, 'r') as f:
j = json.load(f)
return pd.DataFrame(j)
def collect_results_chainer(result_base, table_ys):
"""Gather multiple time series result data and concatenate them
"""
dfs = []
args_list = []
count = 0
for root, dirs, files in os.walk(result_base):
if 'settings.pkl' in files:
logs = [file_ for file_ in files if 'log' in file_]
if len(logs) != 1:
continue
log = logs[0]
df = read_json(os.path.join(root, log))
df = df.interpolate()
with open(os.path.join(root, 'settings.pkl'), 'rb') as f:
logs = pickle.load(f)
for key, val in logs.items():
df[key] = val
df['count'] = count
dfs.append(df)
args_list.append(logs)
count += 1
dfs = pd.concat(dfs)
print('finished collecting')
return dfs, args_list
def collect_results_chainer_interactive(result_base, table_ys):
result_base = interactive(result_base)
return collect_results_chainer(result_base, table_ys)
| mit |
mugurbil/gnm | examples/rosenbrock/rosenbrock.py | 1 | 2592 | # -*- coding: utf-8 -*-
# Simple example with 1D Well
print("---------------------------------\n"+
"-------Rosenbrock Function-------\n"+
"---------------------------------")
import numpy as np
from scipy import integrate
import gnm
import time
import matplotlib.pyplot as plt
# random seeding
np.random.seed(3)
# initial guess
x_0 = [0.1, 0.1]
# user function
def model(x, args):
a = args['a']
b = args['b']
z = (a-x[0])**2+b*(x[1]-x[0]**2)**2
dx = -2*(a-x[0])+2*b*(x[1]-x[0]**2)*(-2*x[0])
dy = 2*b*(x[1]-x[0]**2)
return 1, [z], [[dx, dy]]
# observed data and error = arguments for the user function
args = {'a':1., 'b':1.}
# sampler object
jagger = gnm.sampler(x_0, model, args)
# user-defined prior mean and precision
m = [0., 0.] # vector
H = [[1., 0.],
[0., 1.]] # matrix
jagger.prior(m, H)
# domain for Jtest
d_min = [-3., -3.]
d_max = [3., 3.]
# test the model's function-Jacobian match
error = jagger.Jtest(d_min, d_max)
assert error == 0
# back-off info
max_steps = 0
dilation = 0.1
jagger.static(max_steps, dilation)
# start sampling
print("Sampling...")
n_samples = 1.1*10**5
jagger.sample(n_samples)
# burn the initial samples
n_burn = 10**3
jagger.burn(n_burn)
# print results
print("Acceptence Rate : {:.3f}".format(jagger.accept_rate))
print("Number Sampled : {:.1e}".format(n_samples))
print("Number Burned : {:.1e}".format(n_burn))
print("Number Used : {:.1e}".format(n_samples - n_burn))
# create plot info
n_grid = 100
# domain for error_bars
D_min = [-2., 0.]
D_max = [2., 0.]
x, p_x, err = jagger.error_bars(n_grid, D_min, D_max)
plt.plot(x[0], p_x, color = 'b', marker='o', label="Sampled", linewidth=0)
plt.errorbar(x[0], p_x, yerr = err, fmt = 'b.')
# theoretical curve (quadrature)
def integrand(a):
f = lambda b: jagger.posterior([a, b])
return f
x_min = D_min[0]
x_max = D_max[0]
integral_vector = np.empty([n_grid])
dx = (x_max-x_min)/n_grid
# integrate
for i in xrange(n_grid):
x_now = x_min + i * dx
integral, error = integrate.quad(integrand(x_now), -10, 10)
integral_vector[i] = integral
# normalize
normalization = np.average(integral_vector)*(x_max-x_min)
normed_vector = integral_vector/normalization
plt.plot(x[0], normed_vector, color = 'k', linewidth = 2, label="Theoretical")
# plot options
plt.legend(loc ="lower center")
plt.grid(True)
title = ("Rosenbrock")
plt.title(title)
plt.xlabel("x")
plt.ylabel("Probability")
plt.show()
plt.hist2d(jagger.chain[:,0], jagger.chain[:,1], bins=200, normed=True)
plt.show()
print("--------------FIN!--------------") | mit |
dremio/arrow | python/pyarrow/tests/test_io.py | 2 | 17652 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import partial
from io import BytesIO, TextIOWrapper
import gc
import os
import pytest
import sys
import weakref
import numpy as np
import pandas as pd
from pyarrow.compat import u, guid
import pyarrow as pa
# ----------------------------------------------------------------------
# Python file-like objects
def test_python_file_write():
buf = BytesIO()
f = pa.PythonFile(buf)
assert f.tell() == 0
s1 = b'enga\xc3\xb1ado'
s2 = b'foobar'
f.write(s1.decode('utf8'))
assert f.tell() == len(s1)
f.write(s2)
expected = s1 + s2
result = buf.getvalue()
assert result == expected
f.close()
def test_python_file_read():
data = b'some sample data'
buf = BytesIO(data)
f = pa.PythonFile(buf, mode='r')
assert f.size() == len(data)
assert f.tell() == 0
assert f.read(4) == b'some'
assert f.tell() == 4
f.seek(0)
assert f.tell() == 0
f.seek(5)
assert f.tell() == 5
v = f.read(50)
assert v == b'sample data'
assert len(v) == 11
f.close()
def test_bytes_reader():
# Like a BytesIO, but zero-copy underneath for C++ consumers
data = b'some sample data'
f = pa.BufferReader(data)
assert f.tell() == 0
assert f.size() == len(data)
assert f.read(4) == b'some'
assert f.tell() == 4
f.seek(0)
assert f.tell() == 0
f.seek(5)
assert f.tell() == 5
assert f.read(50) == b'sample data'
f.close()
def test_bytes_reader_non_bytes():
with pytest.raises(TypeError):
pa.BufferReader(u('some sample data'))
def test_bytes_reader_retains_parent_reference():
import gc
# ARROW-421
def get_buffer():
data = b'some sample data' * 1000
reader = pa.BufferReader(data)
reader.seek(5)
return reader.read_buffer(6)
buf = get_buffer()
gc.collect()
assert buf.to_pybytes() == b'sample'
assert buf.parent is not None
def test_python_file_implicit_mode(tmpdir):
path = os.path.join(str(tmpdir), 'foo.txt')
with open(path, 'wb') as f:
pf = pa.PythonFile(f)
assert pf.writable()
assert not pf.readable()
assert not pf.seekable() # PyOutputStream isn't seekable
f.write(b'foobar\n')
with open(path, 'rb') as f:
pf = pa.PythonFile(f)
assert pf.readable()
assert not pf.writable()
assert pf.seekable()
assert pf.read() == b'foobar\n'
bio = BytesIO()
pf = pa.PythonFile(bio)
assert pf.writable()
assert not pf.readable()
assert not pf.seekable()
pf.write(b'foobar\n')
assert bio.getvalue() == b'foobar\n'
def test_python_file_closing():
bio = BytesIO()
pf = pa.PythonFile(bio)
wr = weakref.ref(pf)
del pf
assert wr() is None # object was destroyed
assert not bio.closed
pf = pa.PythonFile(bio)
pf.close()
assert bio.closed
# ----------------------------------------------------------------------
# Buffers
def test_buffer_bytes():
val = b'some data'
buf = pa.py_buffer(val)
assert isinstance(buf, pa.Buffer)
assert not buf.is_mutable
result = buf.to_pybytes()
assert result == val
def test_buffer_memoryview():
val = b'some data'
buf = pa.py_buffer(val)
assert isinstance(buf, pa.Buffer)
assert not buf.is_mutable
result = memoryview(buf)
assert result == val
def test_buffer_bytearray():
val = bytearray(b'some data')
buf = pa.py_buffer(val)
assert isinstance(buf, pa.Buffer)
assert buf.is_mutable
result = bytearray(buf)
assert result == val
def test_buffer_invalid():
with pytest.raises(TypeError,
match="(bytes-like object|buffer interface)"):
pa.py_buffer(None)
def test_buffer_to_numpy():
# Make sure creating a numpy array from an arrow buffer works
byte_array = bytearray(20)
byte_array[0] = 42
buf = pa.py_buffer(byte_array)
array = np.frombuffer(buf, dtype="uint8")
assert array[0] == byte_array[0]
byte_array[0] += 1
assert array[0] == byte_array[0]
assert array.base == buf
def test_buffer_from_numpy():
# C-contiguous
arr = np.arange(12, dtype=np.int8).reshape((3, 4))
buf = pa.py_buffer(arr)
assert buf.to_pybytes() == arr.tobytes()
# F-contiguous; note strides informations is lost
buf = pa.py_buffer(arr.T)
assert buf.to_pybytes() == arr.tobytes()
# Non-contiguous
with pytest.raises(ValueError, match="not contiguous"):
buf = pa.py_buffer(arr.T[::2])
def test_buffer_equals():
# Buffer.equals() returns true iff the buffers have the same contents
def eq(a, b):
assert a.equals(b)
assert a == b
assert not (a != b)
def ne(a, b):
assert not a.equals(b)
assert not (a == b)
assert a != b
b1 = b'some data!'
b2 = bytearray(b1)
b3 = bytearray(b1)
b3[0] = 42
buf1 = pa.py_buffer(b1)
buf2 = pa.py_buffer(b2)
buf3 = pa.py_buffer(b2)
buf4 = pa.py_buffer(b3)
buf5 = pa.py_buffer(np.frombuffer(b2, dtype=np.int16))
eq(buf1, buf1)
eq(buf1, buf2)
eq(buf2, buf3)
ne(buf2, buf4)
# Data type is indifferent
eq(buf2, buf5)
def test_buffer_hashing():
# Buffers are unhashable
with pytest.raises(TypeError, match="unhashable"):
hash(pa.py_buffer(b'123'))
def test_foreign_buffer():
obj = np.array([1, 2], dtype=np.int32)
addr = obj.__array_interface__["data"][0]
size = obj.nbytes
buf = pa.foreign_buffer(addr, size, obj)
wr = weakref.ref(obj)
del obj
assert np.frombuffer(buf, dtype=np.int32).tolist() == [1, 2]
assert wr() is not None
del buf
assert wr() is None
def test_allocate_buffer():
buf = pa.allocate_buffer(100)
assert buf.size == 100
assert buf.is_mutable
bit = b'abcde'
writer = pa.FixedSizeBufferWriter(buf)
writer.write(bit)
assert buf.to_pybytes()[:5] == bit
def test_allocate_buffer_resizable():
buf = pa.allocate_buffer(100, resizable=True)
assert isinstance(buf, pa.ResizableBuffer)
buf.resize(200)
assert buf.size == 200
def test_compress_decompress():
INPUT_SIZE = 10000
test_data = (np.random.randint(0, 255, size=INPUT_SIZE)
.astype(np.uint8)
.tostring())
test_buf = pa.py_buffer(test_data)
codecs = ['lz4', 'snappy', 'gzip', 'zstd', 'brotli']
for codec in codecs:
compressed_buf = pa.compress(test_buf, codec=codec)
compressed_bytes = pa.compress(test_data, codec=codec, asbytes=True)
assert isinstance(compressed_bytes, bytes)
decompressed_buf = pa.decompress(compressed_buf, INPUT_SIZE,
codec=codec)
decompressed_bytes = pa.decompress(compressed_bytes, INPUT_SIZE,
codec=codec, asbytes=True)
assert isinstance(decompressed_bytes, bytes)
assert decompressed_buf.equals(test_buf)
assert decompressed_bytes == test_data
with pytest.raises(ValueError):
pa.decompress(compressed_bytes, codec=codec)
def test_buffer_memoryview_is_immutable():
val = b'some data'
buf = pa.py_buffer(val)
assert not buf.is_mutable
assert isinstance(buf, pa.Buffer)
result = memoryview(buf)
assert result.readonly
with pytest.raises(TypeError) as exc:
result[0] = b'h'
assert 'cannot modify read-only' in str(exc.value)
b = bytes(buf)
with pytest.raises(TypeError) as exc:
b[0] = b'h'
assert 'cannot modify read-only' in str(exc.value)
def test_uninitialized_buffer():
# ARROW-2039: calling Buffer() directly creates an uninitialized object
check_uninitialized = partial(pytest.raises,
ReferenceError, match="uninitialized")
buf = pa.Buffer()
with check_uninitialized():
buf.size
with check_uninitialized():
len(buf)
with check_uninitialized():
buf.is_mutable
with check_uninitialized():
buf.parent
with check_uninitialized():
buf.to_pybytes()
with check_uninitialized():
memoryview(buf)
with check_uninitialized():
buf.equals(pa.py_buffer(b''))
with check_uninitialized():
pa.py_buffer(b'').equals(buf)
def test_memory_output_stream():
# 10 bytes
val = b'dataabcdef'
f = pa.BufferOutputStream()
K = 1000
for i in range(K):
f.write(val)
buf = f.get_result()
assert len(buf) == len(val) * K
assert buf.to_pybytes() == val * K
def test_inmemory_write_after_closed():
f = pa.BufferOutputStream()
f.write(b'ok')
f.get_result()
with pytest.raises(ValueError):
f.write(b'not ok')
def test_buffer_protocol_ref_counting():
def make_buffer(bytes_obj):
return bytearray(pa.py_buffer(bytes_obj))
buf = make_buffer(b'foo')
gc.collect()
assert buf == b'foo'
# ARROW-1053
val = b'foo'
refcount_before = sys.getrefcount(val)
for i in range(10):
make_buffer(val)
gc.collect()
assert refcount_before == sys.getrefcount(val)
def test_nativefile_write_memoryview():
f = pa.BufferOutputStream()
data = b'ok'
arr = np.frombuffer(data, dtype='S1')
f.write(arr)
f.write(bytearray(data))
buf = f.get_result()
assert buf.to_pybytes() == data * 2
# ----------------------------------------------------------------------
# Mock output stream
def test_mock_output_stream():
# Make sure that the MockOutputStream and the BufferOutputStream record the
# same size
# 10 bytes
val = b'dataabcdef'
f1 = pa.MockOutputStream()
f2 = pa.BufferOutputStream()
K = 1000
for i in range(K):
f1.write(val)
f2.write(val)
assert f1.size() == len(f2.get_result())
# Do the same test with a pandas DataFrame
val = pd.DataFrame({'a': [1, 2, 3]})
record_batch = pa.RecordBatch.from_pandas(val)
f1 = pa.MockOutputStream()
f2 = pa.BufferOutputStream()
stream_writer1 = pa.RecordBatchStreamWriter(f1, record_batch.schema)
stream_writer2 = pa.RecordBatchStreamWriter(f2, record_batch.schema)
stream_writer1.write_batch(record_batch)
stream_writer2.write_batch(record_batch)
stream_writer1.close()
stream_writer2.close()
assert f1.size() == len(f2.get_result())
# ----------------------------------------------------------------------
# OS files and memory maps
@pytest.fixture
def sample_disk_data(request, tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
def teardown():
_try_delete(path)
request.addfinalizer(teardown)
return path, data
def _check_native_file_reader(FACTORY, sample_data):
path, data = sample_data
f = FACTORY(path, mode='r')
assert f.read(10) == data[:10]
assert f.read(0) == b''
assert f.tell() == 10
assert f.read() == data[10:]
assert f.size() == len(data)
f.seek(0)
assert f.tell() == 0
# Seeking past end of file not supported in memory maps
f.seek(len(data) + 1)
assert f.tell() == len(data) + 1
assert f.read(5) == b''
# Test whence argument of seek, ARROW-1287
assert f.seek(3) == 3
assert f.seek(3, os.SEEK_CUR) == 6
assert f.tell() == 6
ex_length = len(data) - 2
assert f.seek(-2, os.SEEK_END) == ex_length
assert f.tell() == ex_length
def test_memory_map_reader(sample_disk_data):
_check_native_file_reader(pa.memory_map, sample_disk_data)
def test_memory_map_retain_buffer_reference(sample_disk_data):
path, data = sample_disk_data
cases = []
with pa.memory_map(path, 'rb') as f:
cases.append((f.read_buffer(100), data[:100]))
cases.append((f.read_buffer(100), data[100:200]))
cases.append((f.read_buffer(100), data[200:300]))
# Call gc.collect() for good measure
gc.collect()
for buf, expected in cases:
assert buf.to_pybytes() == expected
def test_os_file_reader(sample_disk_data):
_check_native_file_reader(pa.OSFile, sample_disk_data)
def _try_delete(path):
try:
os.remove(path)
except os.error:
pass
def test_memory_map_writer(tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
f = pa.memory_map(path, mode='r+b')
f.seek(10)
f.write('peekaboo')
assert f.tell() == 18
f.seek(10)
assert f.read(8) == b'peekaboo'
f2 = pa.memory_map(path, mode='r+b')
f2.seek(10)
f2.write(b'booapeak')
f2.seek(10)
f.seek(10)
assert f.read(8) == b'booapeak'
# Does not truncate file
f3 = pa.memory_map(path, mode='w')
f3.write('foo')
with pa.memory_map(path) as f4:
assert f4.size() == SIZE
with pytest.raises(IOError):
f3.read(5)
f.seek(0)
assert f.read(3) == b'foo'
def test_memory_zero_length(tmpdir):
path = os.path.join(str(tmpdir), guid())
f = open(path, 'wb')
f.close()
with pa.memory_map(path, mode='r+b') as memory_map:
assert memory_map.size() == 0
def test_os_file_writer(tmpdir):
SIZE = 4096
arr = np.random.randint(0, 256, size=SIZE).astype('u1')
data = arr.tobytes()[:SIZE]
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data)
# Truncates file
f2 = pa.OSFile(path, mode='w')
f2.write('foo')
with pa.OSFile(path) as f3:
assert f3.size() == 3
with pytest.raises(IOError):
f2.read(5)
def test_native_file_modes(tmpdir):
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(b'foooo')
with pa.OSFile(path, mode='r') as f:
assert f.mode == 'rb'
assert f.readable()
assert not f.writable()
assert f.seekable()
with pa.OSFile(path, mode='rb') as f:
assert f.mode == 'rb'
assert f.readable()
assert not f.writable()
assert f.seekable()
with pa.OSFile(path, mode='w') as f:
assert f.mode == 'wb'
assert not f.readable()
assert f.writable()
assert not f.seekable()
with pa.OSFile(path, mode='wb') as f:
assert f.mode == 'wb'
assert not f.readable()
assert f.writable()
assert not f.seekable()
with open(path, 'wb') as f:
f.write(b'foooo')
with pa.memory_map(path, 'r') as f:
assert f.mode == 'rb'
assert f.readable()
assert not f.writable()
assert f.seekable()
with pa.memory_map(path, 'r+') as f:
assert f.mode == 'rb+'
assert f.readable()
assert f.writable()
assert f.seekable()
with pa.memory_map(path, 'r+b') as f:
assert f.mode == 'rb+'
assert f.readable()
assert f.writable()
assert f.seekable()
def test_native_file_raises_ValueError_after_close(tmpdir):
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(b'foooo')
with pa.OSFile(path, mode='rb') as os_file:
assert not os_file.closed
assert os_file.closed
with pa.memory_map(path, mode='rb') as mmap_file:
assert not mmap_file.closed
assert mmap_file.closed
files = [os_file,
mmap_file]
methods = [('tell', ()),
('seek', (0,)),
('size', ()),
('flush', ()),
('readable', ()),
('writable', ()),
('seekable', ())]
for f in files:
for method, args in methods:
with pytest.raises(ValueError):
getattr(f, method)(*args)
def test_native_file_TextIOWrapper(tmpdir):
data = (u'foooo\n'
u'barrr\n'
u'bazzz\n')
path = os.path.join(str(tmpdir), guid())
with open(path, 'wb') as f:
f.write(data.encode('utf-8'))
with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil:
assert fil.readable()
res = fil.read()
assert res == data
assert fil.closed
with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil:
# Iteration works
lines = list(fil)
assert ''.join(lines) == data
# Writing
path2 = os.path.join(str(tmpdir), guid())
with TextIOWrapper(pa.OSFile(path2, mode='wb')) as fil:
assert fil.writable()
fil.write(data)
with TextIOWrapper(pa.OSFile(path2, mode='rb')) as fil:
res = fil.read()
assert res == data
| apache-2.0 |
akionakamura/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
PytLab/catplot | catplot/ep_components/ep_canvas.py | 1 | 11852 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.lines import Line2D
from matplotlib.patches import Ellipse
from matplotlib.spines import Spine
import numpy as np
from catplot.canvas import Canvas
from catplot.chem_parser import RxnEquation
from catplot.ep_components.ep_lines import EPLine
from catplot.ep_components.ep_chain import EPChain
from catplot.ep_components.ep_lines import ElementaryLine
class EPCanvas(Canvas):
""" Energy profile canvas.
Parameters:
-----------
margin_ratio: float, optional, default is 0.1
control the white space between energy profile line and axes.
figsize : tuple of integers, optional, default: None
width, height in inches. If not provided, defaults to rc figure.figsize.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to rc figure.dpi.
facecolor : str, optional
the background color. If not provided, defaults to rc figure.facecolor
edgecolor : str, optional
the border color. If not provided, defaults to rc figure.edgecolor
x_ticks : float list
set the x ticks with a list of ticks.
y_ticks : float list
set the y ticks with a list of ticks.
"""
def __init__(self, **kwargs):
super(EPCanvas, self).__init__(**kwargs)
self._set_axes()
# Energy profile lines.
self.lines = []
self.shadow_lines = []
# Energy profile chains.
self.chains = []
def add_line(self, ep_line):
""" Add an energy profile line to canvas.
"""
if not isinstance(ep_line, ElementaryLine):
raise ValueError("line added must be instance of EPLine")
if ep_line in self:
msg = "the line is already in canvas, try to add the copy of it if you want."
raise ValueError(msg)
self.lines.append(ep_line)
def add_lines(self, ep_lines):
""" Add energy profile lines to canvas.
"""
# Check lines before adding.
for line in ep_lines:
self.add_line(line)
def add_chain(self, ep_chain):
""" Add energy profile line chain to canvas.
"""
if not isinstance(ep_chain, EPChain):
raise ValueError("Added chain must be instance of EPChain")
if ep_chain in self:
msg = "the chain is already in canvas, try to add the copy of it if you want."
raise ValueError(msg)
self.chains.append(ep_chain)
self.lines.extend(ep_chain.elementary_lines)
def add_chains(self, ep_chains):
""" Add multiple energy profile chains to canvas.
"""
for chain in ep_chains:
self.add_chain(chain)
def add_all_horizontal_auxiliary_lines(self):
""" Add horizontal auxiliary lines to all elementary lines in canvas.
"""
for line in self.lines:
self.add_horizontal_auxiliary_line(line)
return self
def add_all_vertical_auxiliary_lines(self):
""" Add vertical auxiliary lines to all elemtary lines in canvas.
"""
for line in self.lines:
self.add_vertical_auxiliary_lines(line)
return self
def add_all_species_annotations(self):
""" Add all speices annotations to all elementary lines in canvas.
"""
for line in self.lines:
self.add_species_annotations(line)
return self
def add_all_energy_annotations(self):
""" Add all energy annotations to all elementary lines in canvas.
"""
for line in self.lines:
self.add_energy_annotations(line)
return self
def _render_ep_lines(self):
""" Render energy profile lines in canvas.
"""
for line in self.lines:
for idx in range(line.shadow_depth):
identity_trans = transforms.IdentityTransform()
offset = transforms.ScaledTranslation(idx, -idx, identity_trans)
shadow_trans = self.axes.transData + offset
# Create matplotlib Line2D.
alpha = (line.shadow_depth-idx)/2.0/line.shadow_depth
shadow_line = Line2D(line.x, line.y,
linewidth=line.line_width,
color=line.shadow_color,
transform=shadow_trans,
alpha=alpha)
self.shadow_lines.append(shadow_line)
def _get_data_limits(self):
""" Private helper function to get the limits of data.
"""
# Merge all data in energy profile lines.
all_x = np.concatenate([l.x for l in self.lines])
all_y = np.concatenate([l.y for l in self.lines])
max_x = np.max(all_x)
min_x = np.min(all_x)
max_y = np.max(all_y)
min_y = np.min(all_y)
return self._limits(max_x, min_x, max_y, min_y)
def add_species_annotations(self, ep_line):
""" Add annoatates to a specific elementary energy profile line.
Parameters:
-----------
ep_line: EPLine object, the energy profile line.
"""
if ep_line.rxn_equation is None:
return
eigen_pts = ep_line.eigen_points
states = RxnEquation(ep_line.rxn_equation).tolist()
note_offset = ep_line.scale_y/40
params = []
# IS
x_i = ep_line.hline_length/10
y_i = eigen_pts.A[0] + note_offset
note_i = r"$\bf{" + states[0].texen() + r"}$"
params.append([x_i, y_i, note_i])
# FS
x_f = ep_line.hline_length/10 + eigen_pts.D[0]
y_f = eigen_pts.D[1] + note_offset
note_f = r"$\bf{" + states[-1].texen() + r"}$"
params.append([x_f, y_f, note_f])
# TS
if eigen_pts.has_barrier:
x_t = eigen_pts.C[0] - ep_line.hline_length/4
y_t = eigen_pts.C[1] + note_offset
note_t = r"$\bf" + states[1].texen() + r"}$"
params.append([x_t, y_t, note_t])
# Add them to canvas.
for idx, param_list in enumerate(params):
if idx == 2:
self.axes.text(*param_list, fontdict={"fontsize": 13, "color": "#CD5555"})
else:
self.axes.text(*param_list, fontdict={'fontsize': 13, 'color': '#1874CD'})
return self
def add_horizontal_auxiliary_line(self, ep_line):
""" Add horizontal auxiliary line to a specific energy profile line.
Parameters:
-----------
ep_line: EPLine object, the energy profile line.
"""
eigen_pts = ep_line.eigen_points
# Horizontal auxiliary line.
x = [eigen_pts.B[0], eigen_pts.E[0]]
y = [eigen_pts.B[1], eigen_pts.B[1]]
# Add it to axes.
aux_line = Line2D(x, y, color="#595959", linewidth=1, linestyle="dashed")
self.axes.add_line(aux_line)
return self
def add_vertical_auxiliary_lines(self, ep_line):
""" Add vertical auxiliary line to a specific energy profile line.
Parameters:
-----------
ep_line: EPLine object, the energy profile line.
"""
eigen_pts = ep_line.eigen_points
if eigen_pts.has_barrier:
# Arrow between barrier.
x = eigen_pts.C[0]
y1 = eigen_pts.B[1]
y2 = eigen_pts.C[1]
self.axes.annotate("", xy=(x, y1),
xycoords="data",
xytext=(x, y2),
textcoords="data",
arrowprops=dict(arrowstyle="<->"))
# Arrow between reaction energy.
x = (eigen_pts.D[0] + eigen_pts.E[0])/2.0
y1 = eigen_pts.D[-1]
y2 = eigen_pts.B[-1]
self.axes.annotate('', xy=(x, y1),
xycoords="data",
xytext=(x, y2),
textcoords="data",
arrowprops=dict(arrowstyle="<->"))
return self
def add_energy_annotations(self, ep_line):
""" Add energy related annotations to a specific energy profile line.
Parameters:
-----------
ep_line: EPLine object, the energy profile line.
"""
eigen_pts = ep_line.eigen_points
# Energy latex strings.
if eigen_pts.has_barrier:
act_energy_latex = r"$\bf{G_{a} = " + str(ep_line.energies[1]) + r" eV}$"
rxn_energy_latex = r"$\bf{\Delta G = " + str(ep_line.energies[-1]) + r" eV}$"
el = Ellipse((2, -1), 0.5, 0.5)
if eigen_pts.has_barrier:
# Text annotation for barrier.
x = eigen_pts.C[0]
y = (eigen_pts.B[1] + eigen_pts.C[1])/2.0
self.axes.annotate(act_energy_latex,
xy=(x, y),
xytext=(-150, 30),
textcoords="offset points",
size=13,
color="#B22222",
arrowprops=dict(arrowstyle="simple",
fc="0.6",
ec="none",
patchB=el,
connectionstyle="arc3,rad=0.2"))
# Text annotation for reaction energy.
x = (eigen_pts.D[0] + eigen_pts.E[0])/2.0
y = (eigen_pts.D[1] + eigen_pts.B[1])/2.0
self.axes.annotate(rxn_energy_latex,
xy=(x, y),
xytext=(50, 30),
textcoords="offset points",
size=13,
color="#8E388E",
arrowprops=dict(arrowstyle="simple",
fc="0.6",
ec="none",
patchB=el,
connectionstyle="arc3,rad=0.2"))
return self
def draw(self):
""" Draw all lines to canvas.
"""
if not self.lines:
raise AttributeError("Can't draw an empty canvas")
# Render energy profile lines.
self._render_ep_lines()
# Draw shadows.
for shadow_line in self.shadow_lines:
self.axes.add_line(shadow_line)
# Draw energy profile lines.
for line in self.lines:
self.axes.add_line(line.line2d())
# Set axes limits.
limits = self._get_data_limits()
self.axes.set_xlim(limits.min_x, limits.max_x)
self.axes.set_ylim(limits.min_y, limits.max_y)
def redraw(self):
""" Clear current content in canvas and draw all lines again.
"""
self.clear()
self.draw()
def clear(self):
""" Clear the canvas (only the lines in canvas.axes).
"""
self.axes.clear()
def deep_clear(self):
""" Clear all lines in canvas and canvas.axes
"""
self.clear()
self.lines = []
self.chains = []
self.shadow_lines = []
# -------------------------------------------------------------------------
# Magic method to change the default behaviours.
# -------------------------------------------------------------------------
def __contains__(self, item):
""" Membership test operators.
"""
if isinstance(item, ElementaryLine):
return item in self.lines
if isinstance(item, EPChain):
return item in self.chains
| mit |
yanlend/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/ticker.py | 4 | 63240 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick locating
and formatting. Although the locators know nothing about major or minor
ticks, they are used by the Axis class to support major and minor tick
locating and formatting. Generic tick locators and formatters are provided,
as well as domain specific custom ones..
Default Formatter
-----------------
The default formatter identifies when the x-data being
plotted is a small range on top of a large off set. To
reduce the chances that the ticklabels overlap the ticks
are labeled as deltas from a fixed offset. For example::
ax.plot(np.arange(2000, 2010), range(10))
will have tick of 0-9 with an offset of +2e3. If this
is not desired turn off the use of the offset on the default
formatter::
ax.get_xaxis().get_major_formatter().set_useOffset(False)
set the rcParam ``axes.formatter.useoffset=False`` to turn it off
globally, or set a different formatter.
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
MultipleLocator. You initialize this with a base, e.g., 10, and it picks axis
limits and ticks that are multiples of your base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (e.g., where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`SymmetricalLogLocator`
locator for use with with the symlog norm, works like the `LogLocator` for
the part outside of the threshold and add 0 if inside the limits
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
:class:`AutoMinorLocator`
locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. It subdivides the major
tick interval into a specified number of minor intervals,
defaulting to 4 or 5 depending on the major interval.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, e.g., no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`IndexFormatter`
set the strings from a list of labels
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`StrMethodFormatter`
Use string `format` method
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major and minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import decimal
import locale
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
import warnings
if six.PY3:
long = int
class _DummyAxis(object):
def __init__(self, minpos=0):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self._minpos = minpos
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
class TickHelper(object):
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
raise NotImplementedError('Derived must override')
def format_data(self, value):
return self.__call__(value)
def format_data_short(self, value):
"""return a short string version"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
Some classes may want to replace a hyphen for minus with the
proper unicode symbol (U+2212) for typographical correctness.
The default is to not replace it.
Note, if you use this method, e.g., in :meth:`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interactive coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class IndexFormatter(Formatter):
"""
format the position x to the nearest i-th label where i=int(x+0.5)
"""
def __init__(self, labels):
self.labels = labels
self.n = len(labels)
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
i = int(x + 0.5)
if i < 0:
return ''
elif i >= self.n:
return ''
else:
return self.labels[i]
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
*seq* is a sequence of strings. For positions ``i < len(seq)`` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
The function should take in two inputs (tick value *x* and position *pos*)
and return a string
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class StrMethodFormatter(Formatter):
"""
Use a new-style format string (as used by `str.format()`)
to format the tick. The field formatting must be labeled `x`.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt.format(x=x)
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x, d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 10^-n or data >= 10^m, where n and m are the power limits set using
set_powerlimits((n,m)). The defaults for these are controlled by the
axes.formatter.limits rc parameter.
"""
def __init__(self, useOffset=None, useMathText=None, useLocale=None):
# useOffset allows plotting small data ranges with large offsets: for
# example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
# and scientific notation in mathtext
if useOffset is None:
useOffset = rcParams['axes.formatter.useoffset']
self.set_useOffset(useOffset)
self._usetex = rcParams['text.usetex']
if useMathText is None:
useMathText = rcParams['axes.formatter.use_mathtext']
self._useMathText = useMathText
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
if useLocale is None:
useLocale = rcParams['axes.formatter.use_locale']
self._useLocale = useLocale
def get_useOffset(self):
return self._useOffset
def set_useOffset(self, val):
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
return self._useLocale
def set_useLocale(self, val):
if val is None:
self._useLocale = rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def fix_minus(self, s):
"""use a unicode minus rather than hyphen"""
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
return s
else:
return s.replace('-', '\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g., ``formatter.set_powerlimits((-3, 4))`` sets the pre-2007 default
in which scientific notation is used for numbers less than 1e-3 or
greater than 1e4.
See also :meth:`set_scientific`.
'''
if len(lims) != 2:
raise ValueError("'lims' must be a sequence of length 2")
self._powerlimits = lims
def format_data_short(self, value):
"""return a short formatted string representation of a number"""
if self._useLocale:
return locale.format_string('%-12g', (value,))
else:
return '%-12g' % value
def format_data(self, value):
'return a formatted string representation of a number'
if self._useLocale:
s = locale.format_string('%1.10e', (value,))
else:
s = '%1.10e' % value
s = self._formatSciNotation(s)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs) == 0:
return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$', sciNotStr,
r'\mathdefault{', offsetStr, '}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$', sciNotStr, offsetStr, '$'))
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
if self._useOffset:
self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format(vmin, vmax)
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom - range_oom) >= 3: # four sig-figs
p10 = 10 ** range_oom
if ave_loc < 0:
self.offset = (np.ceil(np.max(locs) / p10) * p10)
else:
self.offset = (np.floor(np.min(locs) / p10) * p10)
else:
self.offset = 0
def _set_orderOfMagnitude(self, range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the
# offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset:
oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self, vmin, vmax):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = list(self.locs) + [vmin, vmax]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if np.absolute(xp) < 1e-8:
xp = 0
if self._useLocale:
return locale.format_string(self.format, (xp,))
else:
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
if self._useLocale:
decimal_point = locale.localeconv()['decimal_point']
positive_sign = locale.localeconv()['positive_sign']
else:
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1' and exponent != '':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
return r'%s{\times}%s' % (significand, exponent)
else:
return r'%s%s' % (significand, exponent)
else:
s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
return s
except IndexError:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
"""
def __init__(self, base=10.0, labelOnlyBase=True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base + 0.0
self.labelOnlyBase = labelOnlyBase
def base(self, base):
"""change the *base* for labeling - warning: should always match the
base used for :class:`LogLocator`"""
self._base = base
def label_minor(self, labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase = labelOnlyBase
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b = self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif x > 10000:
s = '%1.0e' % x
elif x < 1:
s = '%1.0e' % x
else:
s = self.pprint_val(x, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self, value):
b = self.labelOnlyBase
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = b
return value
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
b = self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif abs(fx) > 10000:
s = '%1.0g' % fx
elif abs(fx) < 1:
s = '%1.0g' % fx
else:
s = self.pprint_val(fx, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
usetex = rcParams['text.usetex']
# only label the decades
if x == 0:
if usetex:
return '$0$'
else:
return '$\mathdefault{0}$'
fx = math.log(abs(x)) / math.log(b)
is_decade = is_close_to_int(fx)
sign_string = '-' if x < 0 else ''
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if not is_decade and self.labelOnlyBase:
return ''
elif not is_decade:
if usetex:
return (r'$%s%s^{%.2f}$') % \
(sign_string, base, fx)
else:
return ('$\mathdefault{%s%s^{%.2f}}$') % \
(sign_string, base, fx)
else:
if usetex:
return (r'$%s%s^{%d}$') % (sign_string,
base,
nearest_long(fx))
else:
return (r'$\mathdefault{%s%s^{%d}}$') % (sign_string,
base,
nearest_long(fx))
class LogitFormatter(Formatter):
'''Probability formatter (using Math text)'''
def __call__(self, x, pos=None):
s = ''
if 0.01 <= x <= 0.99:
s = '{:.2f}'.format(x)
elif x < 0.01:
if is_decade(x):
s = '$10^{{{:.0f}}}$'.format(np.log10(x))
else:
s = '${:.5f}$'.format(x)
else: # x > 0.99
if is_decade(1-x):
s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
else:
s = '$1-{:.5f}$'.format(1-x)
return s
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
class EngFormatter(Formatter):
"""
Formats axis values using engineering prefixes to represent powers of 1000,
plus a specified unit, e.g., 10 MHz instead of 1e7.
"""
# the unicode for -6 is the greek letter mu
# commeted here due to bug in pep8
# (https://github.com/jcrocholl/pep8/issues/271)
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "\u03bc",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, unit="", places=None):
self.unit = unit
self.places = places
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x), self.unit)
return self.fix_minus(s)
def format_eng(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.places = 0
'0'
>>> format_eng(1000000) # for self.places = 1
'1.0 M'
>>> format_eng("-1e-6") # for self.places = 2
u'-1.00 \u03bc'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0:
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
prefix = self.ENG_PREFIXES[int(pow10)]
mant = sign * dnum / (10 ** pow10)
if self.places is None:
format_str = "%g %s"
elif self.places == 0:
format_str = "%i %s"
elif self.places > 0:
format_str = ("%%.%if %%s" % self.places)
formatted = format_str % (mant, prefix)
return formatted.strip()
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different
:class:`~matplotlib.axis.Axis` because the locator stores references to
the Axis data and view limits
"""
# Some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated.
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated :attr:`axis` simply call
the Locator instance::
>>> print((type(loc)))
<type 'Locator'>
>>> print((loc()))
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def set_params(self, **kwargs):
"""
Do nothing, and rase a warning. Any locator class not supporting the
set_params() function will call this.
"""
warnings.warn("'set_params()' not defined for locator of type " +
str(type(self)))
def __call__(self):
"""Return the locations of the ticks"""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""raise a RuntimeError if Locator attempts to create more than
MAXTICKS locs"""
if len(locs) >= self.MAXTICKS:
msg = ('Locator attempting to generate %d ticks from %s to %s: ' +
'exceeds Locator.MAXTICKS') % (len(locs), locs[0], locs[-1])
raise RuntimeError(msg)
return locs
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally this method is overridden by subclasses to
change locator behaviour.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
"""autoscale the view limits"""
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
"""Pan numticks (can be positive or negative)"""
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if numticks > 2:
step = numsteps * abs(ticks[0] - ticks[1])
else:
d = abs(vmax - vmin)
step = numsteps * d / 6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
interval = abs(vmax - vmin)
step = 0.1 * interval * direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
"""refresh internal information based on current lim"""
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, e.g., on every 5th point. It is assumed that you are doing
index plotting; i.e., the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def set_params(self, base=None, offset=None):
"""Set parameters within this locator"""
if base is not None:
self._base = base
if offset is not None:
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
The subsampling will be done so as to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def set_params(self, nbins=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.absolute(ticks1).min() < np.absolute(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def set_params(self, numticks=None, presets=None):
"""Set parameters within this locator."""
if presets is not None:
self.presets = presets
if numticks is not None:
self.numticks = numticks
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if vmax < vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10 ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x, y):
if abs(x - y) < 1e-10:
return True
else:
return False
class Base(object):
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
if base <= 0:
raise ValueError("'base' must be positive")
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return (d - 1) * self._base
return d * self._base
def le(self, x):
'return the largest multiple of base <= x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1): # was closeto(m, self._base)
#looks like floating point error
return (d + 1) * self._base
return d * self._base
def gt(self, x):
'return the smallest multiple of base > x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1):
#looks like floating point error
return (d + 2) * self._base
return (d + 1) * self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return d * self._base
return (d + 1) * self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def set_params(self, base):
"""Set parameters within this locator."""
if base is not None:
self._base = base
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001 * base) // base
locs = vmin - base + np.arange(n + 3) * base
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin == vmax:
vmin -= 1
vmax += 1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(math.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
default_params = dict(nbins=10,
steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None)
def __init__(self, *args, **kwargs):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
# I left "trim" out; it defaults to True, and it is not
# clear that there is any use case for False, so we may
# want to remove that kwarg. EF 2010/04/18
if args:
kwargs['nbins'] = args[0]
if len(args) > 1:
raise ValueError(
"Keywords are required for all arguments except 'nbins'")
self.set_params(**self.default_params)
self.set_params(**kwargs)
def set_params(self, **kwargs):
"""Set parameters within this locator."""
if 'nbins' in kwargs:
self._nbins = int(kwargs['nbins'])
if 'trim' in kwargs:
self._trim = kwargs['trim']
if 'integer' in kwargs:
self._integer = kwargs['integer']
if 'symmetric' in kwargs:
self._symmetric = kwargs['symmetric']
if 'prune' in kwargs:
prune = kwargs['prune']
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if 'steps' in kwargs:
steps = kwargs['steps']
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if 'integer' in kwargs:
self._integer = kwargs['integer']
if self._integer:
self._steps = [n for n in self._steps if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=1e-13,
tiny=1e-14)
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander=1e-12,
tiny=1.e-13)
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def nearest_long(x):
if x == 0:
return long(0)
elif x > 0:
return long(x + 0.5)
else:
return long(x - 0.5)
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_long(x)) < 1e-10
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0], numdecs=4, numticks=15):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
# this needs to be validated > 1 with traitlets
self.numticks = numticks
self.numdecs = numdecs
def set_params(self, base=None, subs=None, numdecs=None, numticks=None):
"""Set parameters within this locator."""
if base is not None:
self.base = base
if subs is not None:
self.subs = subs
if numdecs is not None:
self.numdecs = numdecs
if numticks is not None:
self.numticks = numticks
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = base + 0.0
def subs(self, subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs) + 0.0
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if self._subs is None: # autosub
if numdec > 10:
subs = np.array([1.0])
elif numdec > 6:
subs = np.arange(2.0, b, 2.0)
else:
subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
if not self.numticks > 1:
raise RuntimeError('The number of ticks must be greater than 1 '
'for LogLocator.')
while numdec / stride + 1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin) - stride,
math.ceil(vmax) + 2 * stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
return vmin, vmax
minpos = self.axis.get_minpos()
if minpos <= 0 or not np.isfinite(minpos):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=None):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._transform.base
t = self._transform.linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(np.floor(float(total_ticks) / (self.numticks - 1)), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
if self._subs is None:
subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class LogitLocator(Locator):
"""
Determine the tick locations for logit axes
"""
def __init__(self, minor=False):
"""
place ticks on the logit locations
"""
self.minor = minor
def set_params(self, minor=None):
"""Set parameters within this locator."""
if minor is not None:
self.minor = minor
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
raise NotImplementedError('Polar axis cannot be logit scaled yet')
# what to do if a window beyond ]0, 1[ is chosen
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if (vmin <= 0.0) or (not np.isfinite(vmin)):
raise ValueError(
"Data has no values in ]0, 1[ and therefore can not be "
"logit-scaled.")
# NOTE: for vmax, we should query a property similar to get_minpos, but
# related to the maximal, less-than-one data point. Unfortunately,
# get_minpos is defined very deep in the BBox and updated with data,
# so for now we use the trick below.
if vmax >= 1.0:
if self.axis is not None:
vmax = 1 - self.axis.get_minpos()
if (vmax >= 1.0) or (not np.isfinite(vmax)):
raise ValueError(
"Data has no values in ]0, 1[ and therefore can not be "
"logit-scaled.")
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = np.log10(vmin / (1 - vmin))
vmax = np.log10(vmax / (1 - vmax))
decade_min = np.floor(vmin)
decade_max = np.ceil(vmax)
# major ticks
if not self.minor:
ticklocs = []
if (decade_min <= -1):
expo = np.arange(decade_min, min(0, decade_max + 1))
ticklocs.extend(list(10**expo))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.append(0.5)
if (decade_max >= 1):
expo = -np.arange(max(1, decade_min), decade_max + 1)
ticklocs.extend(list(1 - 10**expo))
# minor ticks
else:
ticklocs = []
if (decade_min <= -2):
expo = np.arange(decade_min, min(-1, decade_max))
newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
if (decade_max >= 2):
expo = -np.arange(max(2, decade_min), decade_max + 1)
newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
return self.raise_if_exceeds(np.array(ticklocs))
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class AutoMinorLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. Assumes the scale is linear and major ticks are
evenly spaced.
"""
def __init__(self, n=None):
"""
*n* is the number of subdivisions of the interval between
major ticks; e.g., n=2 will place a single minor tick midway
between major ticks.
If *n* is omitted or None, it will be set to 5 or 4.
"""
self.ndivs = n
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
majorstep = 0
if self.ndivs is None:
if majorstep == 0:
# TODO: Need a better way to figure out ndivs
ndivs = 1
else:
x = int(round(10 ** (np.log10(majorstep) % 1)))
if x in [1, 5, 10]:
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if len(majorlocs) > 0:
t0 = majorlocs[0]
tmin = ((vmin - t0) // minorstep + 1) * minorstep
tmax = ((vmax - t0) // minorstep + 1) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
locs = locs.compress(cond)
else:
locs = []
return self.raise_if_exceeds(np.array(locs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self.raise_if_exceeds(self._locator())
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d <= 0:
locator = MultipleLocator(0.2)
else:
try:
ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10 ** fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5 * base:
ticksize = base
elif d >= 2 * base:
ticksize = base / 2.0
else:
ticksize = base / 5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'StrMethodFormatter', 'ScalarFormatter', 'LogFormatter',
'LogFormatterExponent', 'LogFormatterMathtext', 'Locator',
'IndexLocator', 'FixedLocator', 'NullLocator',
'LinearLocator', 'LogLocator', 'AutoLocator',
'MultipleLocator', 'MaxNLocator', 'AutoMinorLocator',
'SymmetricalLogLocator')
| mit |
SANDAG/spandex | spandex/targets/tests/test_scaling.py | 2 | 5985 | import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from spandex.targets import scaling as scl
@pytest.fixture(scope='module')
def col():
return pd.Series([1, 2, 3, 4, 5])
@pytest.fixture(scope='module')
def target_col():
return 'target_col'
@pytest.fixture(scope='module')
def df(target_col):
# a b a b a b a b a b
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
return pd.DataFrame(
{target_col: l,
'geo_id': ['a', 'b'] * 5,
'filter_col': [x + 100 for x in l]})
@pytest.mark.parametrize('metric', ['mean', 'median'])
def test_scale_col_to_target_mean_median(col, metric):
target = 600
expected = pd.Series([200, 400, 600, 800, 1000])
result = scl.scale_col_to_target(col, target, metric=metric)
assert getattr(result, metric)() == target
pdt.assert_series_equal(result, expected, check_dtype=False)
def test_scale_col_to_target_sum(col):
target = 16
expected = col * target / col.sum()
result = scl.scale_col_to_target(col, target, metric='sum')
assert result.sum() == target
pdt.assert_series_equal(result, expected)
def test_scale_col_to_target_clip(col):
target = 600
clip_low = 450
clip_high = 999
expected = pd.Series([450, 450, 600, 800, 999])
result = scl.scale_col_to_target(
col, target, metric='mean', clip_low=clip_low, clip_high=clip_high)
pdt.assert_series_equal(result, expected, check_dtype=False)
def test_scale_col_to_target_round(col):
target = 16
result = scl.scale_col_to_target(
col, target, metric='sum', int_result=True)
pdt.assert_series_equal(result, col)
def test_scale_to_targets(df, target_col):
targets = [100, 1000]
filters = [['geo_id == "a"', 'filter_col < 106'], 'geo_id == "b"']
metric = 'sum'
result = scl.scale_to_targets(df, target_col, targets, metric, filters)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series(
[11.11111111, 66.66666667, 33.33333333, 133.33333333, 55.55555556,
200, 7, 266.66666667, 9, 333.33333333]),
check_dtype=False)
def test_scale_to_targets_no_segments(df, target_col):
target = [1000]
metric = 'mean'
result = scl.scale_to_targets(df, target_col, target, metric=metric)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series(
[181.81818182, 363.63636364, 545.45454545, 727.27272727,
909.09090909, 1090.90909091, 1272.72727273, 1454.54545455,
1636.36363636, 1818.18181818]),
check_dtype=False)
def test_scale_to_targets_clip_int(df, target_col):
target = [1000]
metric = 'mean'
clip_low = 400
clip_high = 999.99
int_result = True
result = scl.scale_to_targets(
df, target_col, target, metric, clip_low=clip_low, clip_high=clip_high,
int_result=int_result)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series([400, 400, 545, 727, 909, 1000, 1000, 1000, 1000, 1000]))
def test_scale_to_targets_from_table(df, target_col):
targets = pd.DataFrame(
{'column_name': [target_col, target_col],
'target_value': [100, 1000],
'target_metric': ['sum', 'sum'],
'filters': ['geo_id == "a",filter_col < 106', 'geo_id == "b"'],
'clip_low': [np.nan, np.nan],
'clip_high': [np.nan, np.nan],
'int_result': [np.nan, np.nan]})
result = scl.scale_to_targets_from_table(df, targets)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series(
[11.11111111, 66.66666667, 33.33333333, 133.33333333, 55.55555556,
200, 7, 266.66666667, 9, 333.33333333]),
check_dtype=False)
def test_scale_to_targets_from_table_clip_int(df, target_col):
targets = pd.DataFrame(
{'column_name': [target_col],
'target_value': [1000],
'target_metric': ['mean'],
'filters': [np.nan],
'clip_low': [400],
'clip_high': [999.99],
'int_result': [True]})
result = scl.scale_to_targets_from_table(df, targets)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series([400, 400, 545, 727, 909, 1000, 1000, 1000, 1000, 1000]))
def test_targets_row_to_params():
column = 'income'
target = 100000
metric = 'mean'
filters = 'geo_id == "a",filter_col < 106'
clip_low = 400
clip_high = 999.9
int_result = True
row = pd.Series(
[column, target, metric, filters, clip_low, clip_high, int_result],
index=[
'column_name', 'target_value', 'target_metric', 'filters',
'clip_low', 'clip_high', 'int_result'])
r = scl._targets_row_to_params(row)
assert r.column == column
assert r.target == target
assert r.metric == metric
assert r.filters == ['geo_id == "a"', 'filter_col < 106']
assert r.clip_low == clip_low
assert r.clip_high == clip_high
assert r.int_result == int_result
def test_targets_row_to_params_defaults():
column = 'income'
target = 100000
metric = 'mean'
filters = np.nan
clip_low = np.nan
clip_high = np.nan
int_result = np.nan
row = pd.Series(
[column, target, metric, filters, clip_low, clip_high, int_result],
index=[
'column_name', 'target_value', 'target_metric', 'filters',
'clip_low', 'clip_high', 'int_result'])
r = scl._targets_row_to_params(row)
assert r.column == column
assert r.target == target
assert r.metric == metric
assert r.filters is None
assert r.clip_low is None
assert r.clip_high is None
assert r.int_result is False
| bsd-3-clause |
credp/lisa | lisa/tests/scheduler/load_tracking.py | 1 | 46840 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2016, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import os
import itertools
from statistics import mean
import pandas as pd
from lisa.tests.base import (
TestMetric, Result, ResultBundle, AggregatedResultBundle, TestBundleBase,
TestBundle, RTATestBundle
)
from lisa.target import Target
from lisa.utils import ArtifactPath, groupby, ExekallTaggable, add, memoized, kwargs_forwarded_to
from lisa.datautils import series_mean, df_window, df_filter_task_ids, series_refit_index, df_split_signals, df_refit_index, series_dereference
from lisa.wlgen.rta import RTA, RTAPhase, PeriodicWload
from lisa.trace import requires_events, may_use_events, MissingTraceEventError
from lisa.analysis.load_tracking import LoadTrackingAnalysis
from lisa.analysis.tasks import TasksAnalysis
from lisa.analysis.rta import RTAEventsAnalysis
from lisa.analysis.frequency import FrequencyAnalysis
from lisa.pelt import PELT_SCALE, simulate_pelt, pelt_settling_time, kernel_util_mean
UTIL_SCALE = PELT_SCALE
UTIL_CONVERGENCE_TIME_S = pelt_settling_time(1, init=0, final=1024)
"""
Time in seconds for util_avg to converge (i.e. ignored time)
"""
class LoadTrackingHelpers:
"""
Common bunch of helpers for load tracking tests.
"""
MAX_RTAPP_CALIB_DEVIATION = 3 / 100
"""
Blacklist CPUs that have a RTapp calibration value that deviates too much
from the average calib value in their capacity class.
"""
@classmethod
def _get_blacklisted_cpus(cls, plat_info):
"""
:meta public:
Consider some CPUs as blacklisted when the load would not be
proportionnal to utilization on them.
That happens for CPUs that are busy executing other code than the test
workload, like handling interrupts. It is detect that by looking at the
RTapp calibration value and we blacklist outliers.
"""
rtapp_calib = plat_info['rtapp']['calib']
blacklisted = set()
# For each class of CPUs, get the average rtapp calibration value
# and blacklist the ones that are deviating too much from that
for cpu_class in plat_info['capacity-classes']:
calib_mean = mean(rtapp_calib[cpu] for cpu in cpu_class)
calib_max = (1 + cls.MAX_RTAPP_CALIB_DEVIATION) * calib_mean
blacklisted.update(
cpu
for cpu in cpu_class
# exclude outliers that are too slow (i.e. calib value too small)
if rtapp_calib[cpu] > calib_max
)
return sorted(blacklisted)
@classmethod
def filter_capacity_classes(cls, plat_info):
"""
Filter out capacity-classes key of ``plat_info`` to remove blacklisted
CPUs provided by:
"""
blacklisted_cpus = set(cls._get_blacklisted_cpus(plat_info))
return [
sorted(set(cpu_class) - blacklisted_cpus)
for cpu_class in plat_info['capacity-classes']
]
@classmethod
def correct_expected_pelt(cls, plat_info, cpu, signal_value):
"""
Correct an expected PELT signal from ``rt-app`` based on the calibration
values.
Since the instruction mix of ``rt-app`` might not be the same as the
benchmark that was used to establish CPU capacities, the duty cycle of
``rt-app`` will only be accurate on big CPUs. When we know on which CPU
the task actually executed, we can correct the expected value based on
the ratio of calibration values and CPU capacities.
"""
calib = plat_info['rtapp']['calib']
rtapp_capacities = plat_info['cpu-capacities']['rtapp']
orig_capacities = plat_info['cpu-capacities']['orig']
# Correct the signal mean to what it should have been if rt-app
# workload was exactly the same as the one used to establish CPU
# capacities
return signal_value * orig_capacities[cpu] / rtapp_capacities[cpu]
class LoadTrackingBase(RTATestBundle, LoadTrackingHelpers, TestBundle):
"""
Base class for shared functionality of load tracking tests
"""
cpufreq_conf = {
"governor": "performance"
}
"""
The cpufreq configuration used while the synthetic workload is being run.
Items are arguments to
:meth:`devlib.module.cpufreq.CpufreqModule.use_governor`.
"""
@classmethod
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, collector=None) -> 'LoadTrackingBase':
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
# After a bit of experimenting, it turns out that on some platforms
# misprediction of the idle time (which leads to a shallow idle state,
# a wakeup and another idle nap) can mess up the duty cycle of the
# rt-app task we're running. In our case, a 50% duty cycle, 16ms period
# task would always be active for 8ms, but it would sometimes sleep for
# only 5 or 6 ms.
# This is fine to do this here, as we only care about the proper
# behaviour of the signal on running/not-running tasks.
with target.disable_idle_states():
with target.cpufreq.use_governor(**cls.cpufreq_conf):
cls.run_rtapp(
target=target,
res_dir=res_dir,
profile=rtapp_profile,
collector=collector
)
return cls(res_dir, plat_info)
@staticmethod
def is_almost_equal(target, value, allowed_delta_pct):
"""
Verify that ``value``` is reasonably close to ``target```
:returns: A tuple (bool, delta_pct)
"""
delta = value - target
delta_pct = delta / target * 100
equal = abs(delta_pct) <= allowed_delta_pct
return (equal, delta_pct)
class InvarianceItem(LoadTrackingBase, ExekallTaggable):
"""
Basic check for CPU and frequency invariant load and utilization tracking
**Expected Behaviour:**
Load tracking signals are scaled so that the workload results in
roughly the same util & load values regardless of compute power of the
CPU used and its frequency.
"""
task_prefix = 'invar'
cpufreq_conf = {
"governor": "userspace"
}
def __init__(self, res_dir, plat_info, cpu, freq, freq_list):
super().__init__(res_dir, plat_info)
self.freq = freq
self.freq_list = freq_list
self.cpu = cpu
@property
def rtapp_profile(self):
return self.get_rtapp_profile(self.plat_info, cpu=self.cpu, freq=self.freq)
@property
def task_name(self):
"""
The name of the only task this test uses
"""
tasks = self.rtapp_tasks
assert len(tasks) == 1
return tasks[0]
@property
def wlgen_task(self):
"""
The :class:`lisa.wlgen.rta.RTATask` description of the only rt-app
task, as specified in the profile.
"""
tasks = list(self.rtapp_profile.values())
assert len(tasks) == 1
return tasks[0]
def get_tags(self):
return {'cpu': f'{self.cpu}@{self.freq}'}
@classmethod
def _get_rtapp_profile(cls, plat_info, cpu, freq):
"""
:meta public:
Get a specification for a rt-app workload with the specificied duty
cycle, pinned to the given CPU.
"""
freq_capa = cls._get_freq_capa(cpu, freq, plat_info)
duty_cycle_pct = freq_capa / UTIL_SCALE * 100
# Use half of the capacity at that OPP, so we are sure that the
# task will fit even at the lowest OPP
duty_cycle_pct //= 2
return {
f"{cls.task_prefix}{cpu}": RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=duty_cycle_pct,
duration=2,
period=cls.TASK_PERIOD,
),
prop_cpus=[cpu],
)
}
@classmethod
def _from_target(cls, target: Target, *, cpu: int, freq: int, freq_list=None, res_dir: ArtifactPath = None, collector=None) -> 'InvarianceItem':
"""
:meta public:
:param cpu: CPU to use, or ``None`` to automatically choose an
appropriate set of CPUs.
:type cpu: int or None
:param freq: Frequency to run at in kHz. It is only relevant in
combination with ``cpu``.
:type freq: int or None
"""
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info, cpu=cpu, freq=freq)
logger = cls.get_logger()
with target.cpufreq.use_governor(**cls.cpufreq_conf):
target.cpufreq.set_frequency(cpu, freq)
logger.debug(f'CPU{cpu} frequency: {target.cpufreq.get_frequency(cpu)}')
cls.run_rtapp(
target=target,
res_dir=res_dir,
profile=rtapp_profile,
collector=collector
)
freq_list = freq_list or [freq]
return cls(res_dir, plat_info, cpu, freq, freq_list)
@staticmethod
def _get_freq_capa(cpu, freq, plat_info):
capacity = plat_info['cpu-capacities']['rtapp'][cpu]
# Scale the capacity linearly according to the frequency
max_freq = max(plat_info['freqs'][cpu])
capacity *= freq / max_freq
return capacity
@LoadTrackingAnalysis.df_task_signal.used_events
@LoadTrackingAnalysis.df_cpus_signal.used_events
@TasksAnalysis.df_task_activation.used_events
def get_simulated_pelt(self, task, signal_name):
"""
Simulate a PELT signal for a given task.
:param task: task to look for in the trace.
:type task: int or str or tuple(int, str)
:param signal_name: Name of the PELT signal to simulate.
:type signal_name: str
:return: A :class:`pandas.DataFrame` with a ``simulated`` column
containing the simulated signal, along with the column of the
signal as found in the trace.
"""
logger = self.get_logger()
trace = self.trace
task = trace.get_task_id(task)
cpus = trace.analysis.tasks.cpus_of_tasks([task])
df_activation = trace.analysis.tasks.df_task_activation(
task,
# Util only takes into account times where the task is actually
# executing
preempted_value=0,
)
df = trace.analysis.load_tracking.df_task_signal(task, signal_name)
df = df.copy(deep=False)
# Ignore the first activation, as its signals are incorrect
df_activation = df_activation.iloc[2:]
# Make sure the activation df does not start before the dataframe of
# signal values, otherwise we cannot provide a sensible init value
df_activation = df_activation[df.index[0]:]
# Get the initial signal value matching the first activation we will care about
init_iloc = df.index.get_loc(df_activation.index[0], method='ffill')
init = df[signal_name].iloc[init_iloc]
try:
# PELT clock in nanoseconds
clock = df['update_time'] * 1e-9
except KeyError:
if any(
self.plat_info['cpu-capacities']['rtapp'][cpu] != UTIL_SCALE
for phase in self.wlgen_task.phases
for cpu in phase['cpus']
):
ResultBundle.raise_skip('PELT time scaling can only be simulated when the PELT clock is available from the trace')
logger.warning('PELT clock is not available, ftrace timestamp will be used at the expense of accuracy')
clock = None
try:
capacity = trace.analysis.load_tracking.df_cpus_signal('capacity', cpus)
except MissingTraceEventError:
capacity = None
else:
# Reshape the capacity dataframe so that we get one column per CPU
capacity = capacity.pivot(columns=['cpu'])
capacity.columns = capacity.columns.droplevel(0)
capacity.ffill(inplace=True)
capacity = df_refit_index(
capacity,
window=(df_activation.index[0], df_activation.index[-1])
)
# Make sure we end up with the timestamp at which the capacity
# changes, rather than the timestamps at which the task is enqueued
# or dequeued.
activation_cpu = df_activation['cpu'].reindex(capacity.index, method='ffill')
capacity = series_dereference(activation_cpu, capacity)
df['simulated'] = simulate_pelt(
df_activation['active'],
index=df.index,
init=init,
clock=clock,
capacity=capacity,
)
# Since load is now CPU invariant in recent kernel versions, we don't
# rescale it back. To match the old behavior, that line is
# needed:
# df['simulated'] /= self.plat_info['cpu-capacities']['rtapp'][cpu] / UTIL_SCALE
kernel_version = self.plat_info['kernel']['version']
if (
signal_name == 'load'
and kernel_version.parts[:2] < (5, 1)
):
logger().warning(f'Load signal is assumed to be CPU invariant, which is true for recent mainline kernels, but may be wrong for {kernel_version}')
df['error'] = df[signal_name] - df['simulated']
df = df.dropna()
return df
def _plot_pelt(self, task, signal_name, simulated, test_name):
trace = self.trace
axis = trace.analysis.load_tracking.plot_task_signals(task, signals=[signal_name])
simulated.plot(ax=axis, drawstyle='steps-post', label=f'simulated {signal_name}')
activation_axis = axis.twinx()
trace.analysis.tasks.plot_task_activation(task, alpha=0.2, axis=activation_axis, duration=True)
axis.legend()
path = ArtifactPath.join(self.res_dir, f'{test_name}_{signal_name}.png')
trace.analysis.load_tracking.save_plot(axis.get_figure(), filepath=path)
def _add_cpu_metric(self, res_bundle):
freq_str = f'@{self.freq}' if self.freq is not None else ''
res_bundle.add_metric("cpu", f'{self.cpu}{freq_str}')
return res_bundle
@memoized
@get_simulated_pelt.used_events
def _test_behaviour(self, signal_name, error_margin_pct):
task = self.task_name
phase = self.wlgen_task.phases[0]
df = self.get_simulated_pelt(task, signal_name)
cpus = sorted(phase['cpus'])
assert len(cpus) == 1
cpu = cpus[0]
expected_duty_cycle_pct = phase['wload'].unscaled_duty_cycle_pct(self.plat_info)
expected_final_util = expected_duty_cycle_pct / 100 * UTIL_SCALE
settling_time = pelt_settling_time(10, init=0, final=expected_final_util)
settling_time += df.index[0]
df = df[settling_time:]
# Instead of taking the mean, take the average between the min and max
# values of the settled signal. This avoids the bias introduced by the
# fact that the util signal stays high while the task sleeps
settled_signal_mean = kernel_util_mean(df[signal_name], plat_info=self.plat_info)
expected_signal_mean = expected_final_util
signal_mean_error_pct = abs(expected_signal_mean - settled_signal_mean) / UTIL_SCALE * 100
res = ResultBundle.from_bool(signal_mean_error_pct < error_margin_pct)
res.add_metric('expected mean', expected_signal_mean)
res.add_metric('settled mean', settled_signal_mean)
res.add_metric('settled mean error', signal_mean_error_pct, '%')
self._plot_pelt(task, signal_name, df['simulated'], 'behaviour')
res = self._add_cpu_metric(res)
return res
@memoized
@get_simulated_pelt.used_events
def _test_correctness(self, signal_name, mean_error_margin_pct, max_error_margin_pct):
task = self.task_name
df = self.get_simulated_pelt(task, signal_name)
abs_error = df['error'].abs()
mean_error_pct = series_mean(abs_error) / UTIL_SCALE * 100
max_error_pct = abs_error.max() / UTIL_SCALE * 100
mean_ok = mean_error_pct <= mean_error_margin_pct
max_ok = max_error_pct <= max_error_margin_pct
res = ResultBundle.from_bool(mean_ok and max_ok)
res.add_metric('actual mean', series_mean(df[signal_name]))
res.add_metric('simulated mean', series_mean(df['simulated']))
res.add_metric('mean error', mean_error_pct, '%')
res.add_metric('actual max', df[signal_name].max())
res.add_metric('simulated max', df['simulated'].max())
res.add_metric('max error', max_error_pct, '%')
self._plot_pelt(task, signal_name, df['simulated'], 'correctness')
res = self._add_cpu_metric(res)
return res
@memoized
@_test_correctness.used_events
def test_util_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> ResultBundle:
"""
Check that the utilization signal is as expected.
:param mean_error_margin_pct: Maximum allowed difference in the mean of
the actual signal and the simulated one, as a percentage of utilization
scale.
:type mean_error_margin_pct: float
:param max_error_margin_pct: Maximum allowed difference between samples
of the actual signal and the simulated one, as a percentage of
utilization scale.
:type max_error_margin_pct: float
"""
return self._test_correctness(
signal_name='util',
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
@memoized
@_test_correctness.used_events
def test_load_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> ResultBundle:
"""
Same as :meth:`test_util_correctness` but checking the load.
"""
return self._test_correctness(
signal_name='load',
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
@memoized
@_test_behaviour.used_events
@RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1)
def test_util_behaviour(self, error_margin_pct=5) -> ResultBundle:
"""
Check the utilization mean is linked to the task duty cycle.
.. note:: That is not really the case, as the util of a task is not
updated when the task is sleeping, but is fairly close to reality
as long as the task period is small enough.
:param error_margin_pct: Allowed difference in percentage of
utilization scale.
:type error_margin_pct: float
"""
return self._test_behaviour('util', error_margin_pct)
@memoized
@_test_behaviour.used_events
@RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1)
def test_load_behaviour(self, error_margin_pct=5) -> ResultBundle:
"""
Same as :meth:`test_util_behaviour` but checking the load.
"""
return self._test_behaviour('load', error_margin_pct)
class Invariance(TestBundleBase, LoadTrackingHelpers):
"""
Basic check for frequency invariant load and utilization tracking
This test runs the same workload on one CPU of each capacity available in
the system at a cross section of available frequencies.
This class is mostly a wrapper around :class:`InvarianceItem`,
providing a way to build a list of those for a few frequencies, and
providing aggregated versions of the tests. Calling the tests methods on
the items directly is recommended to avoid the unavoidable loss of
information when aggregating the
:class:`~lisa.tests.base.Result` of each item.
`invariance_items` instance attribute is a list of instances of
:class:`InvarianceItem`.
"""
NR_FREQUENCIES = 8
"""
Maximum number of tested frequencies.
"""
def __init__(self, res_dir, plat_info, invariance_items):
super().__init__(res_dir, plat_info)
self.invariance_items = invariance_items
@classmethod
def _build_invariance_items(cls, target, res_dir, **kwargs):
"""
Yield a :class:`InvarianceItem` for a subset of target's
frequencies, for one CPU of each capacity class.
This is a generator function.
:Variable keyword arguments: Forwarded to :meth:`InvarianceItem.from_target`
:rtype: Iterator[:class:`InvarianceItem`]
"""
plat_info = target.plat_info
def pick_cpu(filtered_class, cpu_class):
try:
return filtered_class[0]
except IndexError:
raise RuntimeError(f'All CPUs of one capacity class have been blacklisted: {cpu_class}')
# pick one CPU per class of capacity
cpus = [
pick_cpu(filtered_class, cpu_class)
for cpu_class, filtered_class
in zip(
plat_info['capacity-classes'],
cls.filter_capacity_classes(plat_info)
)
]
def select_freqs(cpu):
all_freqs = plat_info['freqs'][cpu]
def interpolate(start, stop, nr):
step = (stop - start) / (nr - 1)
return [start + i * step for i in range(nr)]
# Select the higher freq no matter what
selected_freqs = {max(all_freqs)}
available_freqs = set(all_freqs) - selected_freqs
nr_freqs = cls.NR_FREQUENCIES - len(selected_freqs)
for ideal_freq in interpolate(min(all_freqs), max(all_freqs), nr_freqs):
if not available_freqs:
break
# Select the freq closest to ideal
selected_freq = min(available_freqs, key=lambda freq: abs(freq - ideal_freq))
available_freqs.discard(selected_freq)
selected_freqs.add(selected_freq)
return all_freqs, sorted(selected_freqs)
cpu_freqs = {
cpu: select_freqs(cpu)
for cpu in cpus
}
logger = cls.get_logger()
logger.info('Will run on: {}'.format(
', '.join(
f'CPU{cpu}@{freq}'
for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items())
for freq in freq_list
)
))
for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items()):
for freq in freq_list:
item_dir = ArtifactPath.join(res_dir, f"{InvarianceItem.task_prefix}_{cpu}@{freq}")
os.makedirs(item_dir)
logger.info(f'Running experiment for CPU {cpu}@{freq}')
yield InvarianceItem.from_target(
target,
cpu=cpu,
freq=freq,
freq_list=all_freqs,
res_dir=item_dir,
**kwargs,
)
def iter_invariance_items(self) -> InvarianceItem:
yield from self.invariance_items
@classmethod
@kwargs_forwarded_to(
InvarianceItem._from_target,
ignore=[
'cpu',
'freq',
'freq_list',
]
)
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, collector=None, **kwargs) -> 'Invariance':
return cls(res_dir, target.plat_info,
list(cls._build_invariance_items(target, res_dir, **kwargs))
)
def get_item(self, cpu, freq):
"""
:returns: The
:class:`~lisa.tests.scheduler.load_tracking.InvarianceItem`
generated when running at a given frequency
"""
for item in self.invariance_items:
if item.cpu == cpu and item.freq == freq:
return item
raise ValueError('No invariance item matching {cpu}@{freq}'.format(cpu, freq))
# Combined version of some other tests, applied on all available
# InvarianceItem with the result merged.
@InvarianceItem.test_util_correctness.used_events
def test_util_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_util_correctness`
"""
def item_test(test_item):
return test_item.test_util_correctness(
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
return self._test_all_items(item_test)
@InvarianceItem.test_load_correctness.used_events
def test_load_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_load_correctness`
"""
def item_test(test_item):
return test_item.test_load_correctness(
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
return self._test_all_items(item_test)
@InvarianceItem.test_util_behaviour.used_events
def test_util_behaviour(self, error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_util_behaviour`
"""
def item_test(test_item):
return test_item.test_util_behaviour(
error_margin_pct=error_margin_pct,
)
return self._test_all_items(item_test)
@InvarianceItem.test_load_behaviour.used_events
def test_load_behaviour(self, error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_load_behaviour`
"""
def item_test(test_item):
return test_item.test_load_behaviour(
error_margin_pct=error_margin_pct,
)
return self._test_all_items(item_test)
def _test_all_items(self, item_test):
"""
Apply the `item_test` function on all instances of
:class:`InvarianceItem` and aggregate the returned
:class:`~lisa.tests.base.ResultBundle` into one.
:attr:`~lisa.tests.base.Result.UNDECIDED` is ignored.
"""
item_res_bundles = [
item_test(item)
for item in self.invariance_items
]
return AggregatedResultBundle(item_res_bundles, 'cpu')
@InvarianceItem.test_util_behaviour.used_events
def test_cpu_invariance(self) -> AggregatedResultBundle:
"""
Check that items using the max freq on each CPU is passing util avg test.
There could be false positives, but they are expected to be relatively
rare.
.. seealso:: :class:`InvarianceItem.test_util_behaviour`
"""
res_list = []
for cpu, item_group in groupby(self.invariance_items, key=lambda x: x.cpu):
item_group = list(item_group)
# combine all frequencies of that CPU class, although they should
# all be the same
max_freq = max(itertools.chain.from_iterable(
x.freq_list for x in item_group
))
max_freq_items = [
item
for item in item_group
if item.freq == max_freq
]
for item in max_freq_items:
# Only test util, as it should be more robust
res = item.test_util_behaviour()
res_list.append(res)
return AggregatedResultBundle(res_list, 'cpu')
@InvarianceItem.test_util_behaviour.used_events
def test_freq_invariance(self) -> ResultBundle:
"""
Check that at least one CPU has items passing for all tested frequencies.
.. seealso:: :class:`InvarianceItem.test_util_behaviour`
"""
logger = self.get_logger()
def make_group_bundle(cpu, item_group):
bundle = AggregatedResultBundle(
[
# Only test util, as it should be more robust
item.test_util_behaviour()
for item in item_group
],
# each item's "cpu" metric also contains the frequency
name_metric='cpu',
)
# At that level, we only report the CPU, since nested bundles cover
# different frequencies
bundle.add_metric('cpu', cpu)
logger.info(f'Util avg invariance {bundle.result.lower_name} for CPU {cpu}')
return bundle
group_result_bundles = [
make_group_bundle(cpu, item_group)
for cpu, item_group in groupby(self.invariance_items, key=lambda x: x.cpu)
]
# The combination differs from the AggregatedResultBundle default one:
# we consider as passed as long as at least one of the group has
# passed, instead of forcing all of them to pass.
if any(result_bundle.result is Result.PASSED for result_bundle in group_result_bundles):
overall_result = Result.PASSED
elif all(result_bundle.result is Result.UNDECIDED for result_bundle in group_result_bundles):
overall_result = Result.UNDECIDED
else:
overall_result = Result.FAILED
return AggregatedResultBundle(
group_result_bundles,
name_metric='cpu',
result=overall_result
)
class CPUMigrationBase(LoadTrackingBase):
"""
Base class for migration-related load tracking tests
The idea here is to run several rt-app tasks and to have them pinned to
a single CPU for a single phase. They can change CPUs in a new phase,
and we can then inspect the CPU utilization - it should match the
sum of the utilization of all the tasks running on it.
**Design notes:**
Since we sum up the utilization of each task, make sure not to overload the
CPU - IOW, there should always be some idle cycles.
The code assumes all tasks have the same number of phases, and that those
phases are all aligned.
"""
PHASE_DURATION = 3 * UTIL_CONVERGENCE_TIME_S
"""
The duration of a single phase
"""
TASK_PERIOD = 16e-3
"""
The average value of the runqueue PELT signals is very dependent on the
task period, so it's important to set it to a known validated value in that
class.
"""
@abc.abstractmethod
def get_nr_required_cpu(cls, plat_info):
"""
The number of CPUs of same capacity involved in the test
"""
pass
@classmethod
def run_rtapp(cls, *, profile, **kwargs):
# Just do some validation on the profile
for name, task in profile.items():
for phase in task.phases:
if len(phase['cpus']) != 1:
raise RuntimeError(f"Each phase must be tied to a single CPU. Task \"{name}\" violates this")
super().run_rtapp(profile=profile, **kwargs)
@property
def cpus(self):
"""
All CPUs used by RTapp workload.
"""
return set(itertools.chain.from_iterable(
phase['cpus']
for task in self.rtapp_profile.values()
for phase in task.phases
))
@classmethod
def check_from_target(cls, target):
super().check_from_target(target)
try:
target.plat_info["cpu-capacities"]['rtapp']
except KeyError as e:
ResultBundle.raise_skip(str(e), from_=e)
# Check that there are enough CPUs of the same capacity
cls.get_migration_cpus(target.plat_info)
@classmethod
def get_migration_cpus(cls, plat_info):
"""
:returns: N CPUs of same capacity, with N set by :meth:`get_nr_required_cpu`.
"""
# Iterate over descending CPU capacity groups
nr_required_cpu = cls.get_nr_required_cpu(plat_info)
cpu_classes = plat_info["capacity-classes"]
# If the CPU capacities are writeable, it's better to give priority to
# LITTLE cores as they will be less prone to thermal capping.
# Otherwise, it's better to pick big cores as they will not be affected
# by CPU invariance issues.
if not plat_info['cpu-capacities']['writeable']:
cpu_classes = reversed(cpu_classes)
for cpus in cpu_classes:
if len(cpus) >= nr_required_cpu:
return cpus[:nr_required_cpu]
ResultBundle.raise_skip(
f"This workload requires {nr_required_cpu} CPUs of identical capacity")
# Don't strictly check for cpu_frequency, since there might be no occurence
# of the event.
@may_use_events(FrequencyAnalysis.df_cpus_frequency.used_events)
@TasksAnalysis.df_task_activation.used_events
@RTAEventsAnalysis.df_phases.used_events
def get_expected_cpu_util(self):
"""
Get the per-phase average CPU utilization expected from the duty cycle
of the tasks found in the trace.
:returns: A dict of the shape {cpu : {phase_id : expected_util}}
.. note:: This is more robust than just looking at the duty cycle in
the task profile, since rtapp might not reproduce accurately the
duty cycle it was asked.
"""
cpu_capacities = self.plat_info['cpu-capacities']['rtapp']
cpu_util = {}
cpu_freqs = self.plat_info['freqs']
try:
freq_df = self.trace.analysis.frequency.df_cpus_frequency()
except MissingTraceEventError:
cpus_rel_freq = None
else:
cpus_rel_freq = {
# Frequency, normalized according to max frequency on that CPU
cols['cpu']: df['frequency'] / max(cpu_freqs[cols['cpu']])
for cols, df in df_split_signals(freq_df, ['cpu'])
}
for task in self.rtapp_task_ids:
df = self.trace.analysis.tasks.df_task_activation(task)
for row in self.trace.analysis.rta.df_phases(task, wlgen_profile=self.rtapp_profile).itertuples():
if not row.properties['meta']['from_test']:
continue
phase = row.phase
duration = row.duration
start = row.Index
end = start + duration
# Ignore the first quarter of the util signal of each phase, since
# it's impacted by the phase change, and util can be affected
# (rtapp does some bookkeeping at the beginning of phases)
# start += duration / 4
# readjust the duration to take into account the modification of start
duration = end - start
window = (start, end)
phase_df = df_window(df, window, clip_window=True)
for cpu in self.cpus:
if cpus_rel_freq is None:
rel_freq_mean = 1
else:
phase_freq_series = df_window(cpus_rel_freq[cpu], window=window, clip_window=True)
# # We might not have frequency data at the beginning of the
# # trace, or if not frequency transition happened at all.
if phase_freq_series.empty:
rel_freq_mean = 1
else:
# If we lack freq data at the beginning of the
# window, assume the frequency was right.
if phase_freq_series.index[0] > start:
phase_freq_series = pd.concat([pd.Series([1.0], index=[start]), phase_freq_series])
# Extend the frequency to the right so that the mean
# takes into account all the data we have
freq_window = (phase_freq_series.index[0], end)
rel_freq_mean = series_mean(series_refit_index(phase_freq_series, window=freq_window))
cpu_phase_df = phase_df[phase_df['cpu'] == cpu].dropna()
if cpu_phase_df.empty:
duty_cycle = 0
cpu_residency = 0
else:
duty_cycle = series_mean(df_refit_index(cpu_phase_df['duty_cycle'], window=window))
cpu_residency = end - max(cpu_phase_df.index[0], start)
phase_util = UTIL_SCALE * duty_cycle * (cpu_capacities[cpu] / UTIL_SCALE)
# Pro-rata with the time spent on that CPU, so we get
# the correct average.
phase_util *= cpu_residency / duration
# We might not have run at max freq, e.g. because of
# thermal capping, so take that into account
phase_util *= rel_freq_mean
cpu_util.setdefault(cpu, {}).setdefault(phase, 0)
cpu_util[cpu][phase] += phase_util
return cpu_util
@LoadTrackingAnalysis.df_cpus_signal.used_events
def get_trace_cpu_util(self):
"""
Get the per-phase average CPU utilization read from the trace
:returns: A dict of the shape {cpu : {phase_id : trace_util}}
"""
df = self.trace.analysis.load_tracking.df_cpus_signal('util')
tasks = self.rtapp_task_ids_map.keys()
task = sorted(task for task in tasks if task.startswith('migr'))[0]
task = self.rtapp_task_ids_map[task][0]
cpu_util = {}
for row in self.trace.analysis.rta.df_phases(task, wlgen_profile=self.rtapp_profile).itertuples():
if not row.properties['meta']['from_test']:
continue
phase = row.phase
duration = row.duration
start = row.Index
end = start + duration
# Ignore the first quarter of the util signal of each phase, since
# it's impacted by the phase change, and util can be affected
# (rtapp does some bookkeeping at the beginning of phases)
start += duration / 4
phase_df = df_window(df, (start, end), method='pre', clip_window=True)
for cpu in self.cpus:
util = phase_df[phase_df['cpu'] == cpu]['util']
cpu_util.setdefault(cpu, {})[phase] = kernel_util_mean(util, plat_info=self.plat_info)
return cpu_util
@LoadTrackingAnalysis.plot_task_signals.used_events
def _plot_util(self):
trace = self.trace
analysis = trace.analysis.load_tracking
fig, axes = analysis.setup_plot(nrows=len(self.rtapp_tasks))
for task, axis in zip(self.rtapp_tasks, axes):
analysis.plot_task_signals(task, signals=['util'], axis=axis)
trace.analysis.rta.plot_phases(task, axis=axis, wlgen_profile=self.rtapp_profile)
activation_axis = axis.twinx()
trace.analysis.tasks.plot_task_activation(task, duty_cycle=True, overlay=True, alpha=0.2, axis=activation_axis)
df_activations = trace.analysis.tasks.df_task_activation(task)
df_util = analysis.df_task_signal(task, 'util')
def compute_means(row):
start = row.name
end = start + row['duration']
phase_activations = df_window(df_activations, (start, end))
phase_util = df_window(df_util, (start, end))
series = pd.Series({
'Phase duty cycle average': series_mean(phase_activations['duty_cycle']),
'Phase util tunnel average': kernel_util_mean(
phase_util['util'],
plat_info=self.plat_info,
),
})
return series
df_means = trace.analysis.rta.df_phases(task).apply(compute_means, axis=1)
df_means = series_refit_index(df_means, window=trace.window)
df_means['Phase duty cycle average'].plot(drawstyle='steps-post', ax=activation_axis)
df_means['Phase util tunnel average'].plot(drawstyle='steps-post', ax=axis)
activation_axis.legend()
axis.legend()
filepath = ArtifactPath.join(self.res_dir, 'tasks_util.png')
analysis.save_plot(fig, filepath=filepath)
filepath = ArtifactPath.join(self.res_dir, 'cpus_util.png')
cpus = sorted(self.cpus)
analysis.plot_cpus_signals(cpus, signals=['util'], filepath=filepath)
@get_trace_cpu_util.used_events
@get_expected_cpu_util.used_events
@_plot_util.used_events
@RTATestBundle.test_noisy_tasks.undecided_filter(noise_threshold_pct=1)
def test_util_task_migration(self, allowed_error_pct=3) -> ResultBundle:
"""
Test that a migrated task properly propagates its utilization at the CPU level
:param allowed_error_pct: How much the trace averages can stray from the
expected values
:type allowed_error_pct: float
"""
expected_util = self.get_expected_cpu_util()
trace_util = self.get_trace_cpu_util()
passed = True
expected_metrics = {}
trace_metrics = {}
deltas = {}
for cpu in self.cpus:
expected_cpu_util = expected_util[cpu]
trace_cpu_util = trace_util[cpu]
cpu_str = f"cpu{cpu}"
expected_metrics[cpu_str] = TestMetric({})
trace_metrics[cpu_str] = TestMetric({})
deltas[cpu_str] = TestMetric({})
for phase in sorted(trace_cpu_util.keys() & expected_cpu_util.keys()):
expected_phase_util = expected_cpu_util[phase]
trace_phase_util = trace_cpu_util[phase]
is_equal, delta = self.is_almost_equal(
expected_phase_util,
trace_phase_util,
allowed_error_pct)
if not is_equal:
passed = False
# Just some verbose metric collection...
phase_str = f"phase{phase}"
expected_metrics[cpu_str].data[phase] = TestMetric(expected_phase_util)
trace_metrics[cpu_str].data[phase] = TestMetric(trace_phase_util)
deltas[cpu_str].data[phase] = TestMetric(delta, "%")
res = ResultBundle.from_bool(passed)
res.add_metric("Expected utilization", expected_metrics)
res.add_metric("Trace utilization", trace_metrics)
res.add_metric("Utilization deltas", deltas)
self._plot_util()
return res
class OneTaskCPUMigration(CPUMigrationBase):
"""
Some tasks on two big CPUs, one of them migrates in its second phase.
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
return 2
@classmethod
def _get_rtapp_profile(cls, plat_info):
cpus = cls.get_migration_cpus(plat_info)
nr_cpus = len(cpus)
periodic_settings = dict(
duration=cls.PHASE_DURATION,
period=cls.TASK_PERIOD,
)
return {
# A task that will migrate to another CPU
'migr': add(
RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=20,
scale_for_cpu=cpu,
**periodic_settings,
),
prop_cpus=[cpu],
)
for cpu in cpus
),
**{
# Just some tasks that won't move to get some background utilization
f"static{i}": nr_cpus * RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=30,
scale_for_cpu=cpus[i],
**periodic_settings,
),
prop_cpus=[cpus[i]]
)
for i in range(min(2, nr_cpus))
}
}
class NTasksCPUMigrationBase(CPUMigrationBase):
"""
N tasks on N CPUs, with all the migration permutations.
"""
@classmethod
def _get_rtapp_profile(cls, plat_info):
cpus = cls.get_migration_cpus(plat_info)
def make_name(i):
return f'migr{i}'
nr_tasks = len(cpus)
# Define one task per CPU, and create all the possible migrations by
# shuffling around these tasks
profile = {}
for cpus_combi in itertools.permutations(cpus, r=nr_tasks):
for i, cpu in enumerate(cpus_combi):
task_name = make_name(i)
task = profile.setdefault(task_name, RTAPhase())
profile[task_name] = task + RTAPhase(
prop_wload=PeriodicWload(
duty_cycle_pct=50,
scale_for_cpu=cpu,
duration=cls.PHASE_DURATION,
period=cls.TASK_PERIOD,
),
prop_cpus=[cpu],
)
return profile
class TwoTasksCPUMigration(NTasksCPUMigrationBase):
"""
Two tasks on two big CPUs, swap their CPU in the second phase
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
return 2
class NTasksCPUMigration(NTasksCPUMigrationBase):
"""
N tasks on N CPUs, and try all permutations of tasks and CPUs.
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
"""
Select the maximum number of CPUs the tests can handle.
"""
return max(len(cpus) for cpus in plat_info["capacity-classes"])
def test_util_task_migration(self, allowed_error_pct=8) -> ResultBundle:
"""
Relax the margins compared to the super-class version.
"""
return super().test_util_task_migration(
allowed_error_pct=allowed_error_pct,
)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/frame/test_misc_api.py | 7 | 16059 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
import sys
import nose
from distutils.version import LooseVersion
from pandas.compat import range, lrange
from pandas import compat
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class SharedWithSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assert_index_equal(f.index, joined.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assert_index_equal(joined.index, f.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assert_index_equal(joined.index, f2.index)
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assert_index_equal(joined.index, f.index.intersection(f2.index))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but '
'no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
assert_frame_equal(joined, self.frame, check_names=False)
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = pd.Index(['foo#%s' % c for c in self.frame.columns])
self.assert_index_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = pd.Index(['%s#foo' % c for c in self.frame.columns])
self.assert_index_equal(with_suffix.columns, expected)
class TestDataFrameMisc(tm.TestCase, SharedWithSparse, TestData):
klass = DataFrame
_multiprocess_can_split_ = True
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number,
None)
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_array_interface(self):
with np.errstate(all='ignore'):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [
(0, 1, 1), (1, 2, 2), (2, 3, 3)])
self.assertEqual(repr(list(df.itertuples(name=None))),
'[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
tup = next(df.itertuples(name='TestName'))
# no support for field renaming in Python 2.6, regular tuples are
# returned
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup._fields, ('Index', 'a', 'b'))
self.assertEqual((tup.Index, tup.a, tup.b), tup)
self.assertEqual(type(tup).__name__, 'TestName')
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
self.assertEqual(tup2, (0, 1, 4))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
self.assertFalse(hasattr(tup3, '_fields'))
self.assertIsInstance(tup3, tuple)
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
# ---------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
self.assertEqual(res, exp)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/svm/classes.py | 1 | 26657 | from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin
from ..feature_selection.selector_mixin import SelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, SelectorMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is choosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
# all the implementation is provided by the mixins
pass
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function.
It is significant only in 'poly' and 'sigmoid'.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf' and 'poly'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[ 1.]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classififcation
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1):
super(SVC, self).__init__('c_svc', kernel, degree, gamma, coef0, tol,
C, 0., 0., shrinking, probability, cache_size, "auto",
class_weight, verbose, max_iter)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, shrinking=True, tol=0.001,
verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[ 1.]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1):
super(NuSVC, self).__init__('nu_svc', kernel, degree, gamma, coef0,
tol, 0., nu, 0., shrinking, probability, cache_size,
"auto", None, verbose, max_iter)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__('epsilon_svr', kernel, degree, gamma, coef0,
tol, C, 0., epsilon, shrinking, probability, cache_size,
"auto", None, verbose, max_iter)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1):
super(NuSVR, self).__init__('nu_svr', kernel, degree, gamma, coef0,
tol, C, nu, 0., shrinking, probability, cache_size,
"auto", None, verbose, max_iter)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional
Degree of kernel function. Significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional
Independent term in kernel function. It is only significant in
poly/sigmoid.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1):
super(OneClassSVM, self).__init__('one_class', kernel, degree, gamma,
coef0, tol, 0., nu, 0., shrinking, False, cache_size,
"auto", None, verbose, max_iter)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| unlicense |
aurelieladier/openturns | python/doc/pyplots/WelchFactory.py | 2 | 2562 | import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View
# Create the time grid
# In the context of the spectral estimate or Fourier transform use,
# we use data blocs with size of form 2^p
tMin = 0.
timeStep = 0.1
size = pow(2, 12)
myTimeGrid = ot.RegularGrid(tMin, timeStep, size)
# We fix the parameter of the Cauchy model
amplitude = [5]
scale = [3]
model = ot.ExponentialCauchy(scale, amplitude)
myNormalProcess = ot.SpectralNormalProcess(model, myTimeGrid)
# Get a time series or a sample of time series
#myTimeSeries = myNormalProcess.getRealization()
mySample = myNormalProcess.getSample(1000)
mySegmentNumber = 10
myOverlapSize = 0.3
# Build a spectral model factory
myFactory = ot.WelchFactory(ot.Hanning(), mySegmentNumber, myOverlapSize)
# Estimation on a TimeSeries or on a ProcessSample
#myEstimatedModel_TS = myFactory.build(myTimeSeries)
myEstimatedModel_PS = myFactory.build(mySample)
# Change the filtering window
myFactory.setFilteringWindows(ot.Hamming())
# Get the FFT algorithm
myFFT = myFactory.getFFTAlgorithm()
# Get the frequencyGrid
frequencyGrid = myEstimatedModel_PS.getFrequencyGrid()
# With the model, we want to compare values
# We compare values computed with theoritical values
plotSample = ot.NumericalSample(frequencyGrid.getN(), 3)
# Loop of comparison ==> data are saved in plotSample
for k in range(frequencyGrid.getN()):
freq = frequencyGrid.getStart() + k * frequencyGrid.getStep()
plotSample[k, 0] = freq
plotSample[k, 1] = abs(myEstimatedModel_PS(freq)[0, 0])
plotSample[k, 2] = abs(model.computeSpectralDensity(freq)[0, 0])
# Graph section
# We build 2 curves
# each one is function of frequency values
ind = ot.Indices(2)
ind.fill()
# Some cosmetics : labels, legend position, ...
graph = ot.Graph("Estimated spectral function - Validation", "Frequency",
"Spectral density function", True, "topright", 1.0, ot.GraphImplementation.LOGY)
# The first curve is the estimate density as function of frequency
curve1 = ot.Curve(plotSample.getMarginal(ind))
curve1.setColor('blue')
curve1.setLegend('estimate model')
# The second curve is the theoritical density as function of frequency
ind[1] = 2
curve2 = ot.Curve(plotSample.getMarginal(ind))
curve2.setColor('red')
curve2.setLegend('Cauchy model')
graph.add(curve1)
graph.add(curve2)
fig = plt.figure(figsize=(10, 4))
plt.suptitle('Spectral model estimation')
graph_axis = fig.add_subplot(111)
view = View(graph, figure=fig, axes=[graph_axis], add_legend=False)
| lgpl-3.0 |
annahs/atmos_research | WHI_long_term_correlation_GC_SP2_for_NPac_subclusters.py | 1 | 2811 | import matplotlib.pyplot as plt
from matplotlib import dates
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import copy
import calendar
import mysql.connector
timezone = -8
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#select data (spikes and fire times already rmoved)
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime, meas_mean_mass_conc, meas_rel_err, GC_v10_default, GC_default_rel_err, cluster,cluster_number FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90')
cursor.execute(SP2_data_query)
raw_data = cursor.fetchall()
correlation_plot1 = []
correlation_plot3 = []
correlation_plot5 = []
correlation_plot10 = []
for row in raw_data:
UTC_ts = row[0]
PST_date_time = datetime.utcfromtimestamp(UTC_ts) + timedelta(hours = timezone)
meas_mass_conc = float(row[1])
meas_rel_err = float(row[2])
meas_abs_err = meas_rel_err*meas_mass_conc
GC_mass_conc = row[3]
GC_rel_err = 0#row[4]
GC_abs_err = GC_rel_err*GC_mass_conc
cluster = row[5]
ratio = GC_mass_conc/meas_mass_conc
ratio_abs_err = (meas_rel_err + GC_rel_err)*ratio
cluster_number = row[6]
if cluster_number == 1:
correlation_plot1.append([meas_mass_conc,GC_mass_conc])
if cluster_number == 3:
correlation_plot3.append([meas_mass_conc,GC_mass_conc])
if cluster_number == 5:
correlation_plot5.append([meas_mass_conc,GC_mass_conc])
if cluster_number == 10:
correlation_plot10.append([meas_mass_conc,GC_mass_conc])
#if cluster == 'Cont':
# correlation_plot.append([meas_mass_conc,GC_mass_conc])
meas1 = [row[0] for row in correlation_plot1]
GC1 = [row[1] for row in correlation_plot1]
meas3 = [row[0] for row in correlation_plot3]
GC3 = [row[1] for row in correlation_plot3]
meas5 = [row[0] for row in correlation_plot5]
GC5 = [row[1] for row in correlation_plot5]
meas10 = [row[0] for row in correlation_plot10]
GC10 = [row[1] for row in correlation_plot10]
fig = plt.figure(figsize=(10,8))
ax1 = fig.add_subplot(224)
ax1.scatter(meas1, GC1,color='m')
ax1.set_ylim(0,300)
ax1.set_xlim(0,300)
ax1.set_ylabel('GEOS-Chem 6h mass conc')
ax1.set_xlabel('SP2 6h mass conc')
ax2 = fig.add_subplot(222)
ax2.scatter(meas3, GC3,color='g')
ax2.set_ylim(0,300)
ax2.set_xlim(0,300)
ax2.set_ylabel('GEOS-Chem 6h mass conc')
ax2.set_xlabel('SP2 6h mass conc')
ax3 = fig.add_subplot(223)
ax3.scatter(meas5, GC5,color='b')
ax3.set_ylim(0,300)
ax3.set_xlim(0,300)
ax3.set_ylabel('GEOS-Chem 6h mass conc')
ax3.set_xlabel('SP2 6h mass conc')
ax4 = fig.add_subplot(221)
ax4.scatter(meas10, GC10, color='c')
ax4.set_ylim(0,300)
ax4.set_xlim(0,300)
ax4.set_ylabel('GEOS-Chem 6h mass conc')
ax4.set_xlabel('SP2 6h mass conc')
plt.show() | mit |
ShibataLabPrivate/GPyWorkshop | Experiments/interactive.py | 1 | 1357 | # interactive.py: demo to interact with latent space learnt by BGPLVM
# Author: Nishanth Koganti
# Date: 20117/4/1
# Source: Gaussian Process Summer School, 2015
# import libraries
import matplotlib
import GPy
import numpy as np
import cPickle as pickle
from matplotlib import pyplot as plt
# choose subset of digits to work on
which = [0,1,2,6,7,9]
data = np.load('digits.npy')
data = data[which,:,:,:]
num_classes, num_samples, height, width = data.shape
# get the digits data and corresponding labels
Y = data.reshape((data.shape[0]*data.shape[1],data.shape[2]*data.shape[3]))
lbls = np.array([[l]*num_samples for l in which]).reshape(Y.shape[0], 1)
labels = np.array([[str(l)]*num_samples for l in which])
# load the pickle file
with open('digits.p', 'rb') as f:
m = pickle.load(f)
# create interactive visualizer
fig = plt.figure('Latent Space', figsize=(16,6))
ax_latent = fig.add_subplot(121)
ax_scales = fig.add_subplot(122)
fig_out = plt.figure('Output', figsize=(1,1))
ax_image = fig_out.add_subplot(111)
fig_out.tight_layout(pad=0)
data_show = GPy.plotting.matplot_dep.visualize.image_show(m.Y[0:1, :], dimensions=(16, 16), transpose=0, invert=0, scale=False, axes=ax_image)
lvm_visualizer = GPy.plotting.matplot_dep.visualize.lvm_dimselect(m.X.mean.copy(), m, data_show, ax_latent, ax_scales, labels=labels.flatten())
plt.show()
| mit |
GMDSP-Linked-Data/RDF-work-in-progress | Fire/FireStatsDataCube2.py | 1 | 7444 | __author__ = 'danielkershaw'
import datetime, os, sys, re, time
from rdflib import ConjunctiveGraph, Namespace, Literal
from rdflib.store import NO_STORE, VALID_STORE
import pandas
from tempfile import mktemp
try:
import imdb
except ImportError:
imdb = None
from rdflib import BNode, Graph, URIRef, Literal, Namespace, RDF
from rdflib.namespace import FOAF, DC
from rdflib.namespace import XSD
storefn = os.path.dirname(os.path.realpath(__file__)) + '/Output/Fire2.rdf'
storen3 = os.path.dirname(os.path.realpath(__file__)) + '/Output/Fire2.ttl'
#storefn = '/home/simon/codes/film.dev/movies.n3'
storeuri = 'file://'+storefn
storeun3 = 'file://'+storen3
title = 'Movies viewed by %s'
r_who = re.compile('^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$')
SPACIAL = Namespace('http://data.ordnancesurvey.co.uk/ontology/spatialrelations/')
POST = Namespace('http://data.ordnancesurvey.co.uk/ontology/postcode/')
ADMINGEO = Namespace('http://data.ordnancesurvey.co.uk/ontology/admingeo/')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')
SCHEME = Namespace('http://schema.org/')
SDMX = Namespace("http://purl.org/linked-data/sdmx#")
SDMXCONCEPT = Namespace("http://purl.org/linked-data/sdmx/2009/concept#")
SDMXDIMENSION = Namespace("http://purl.org/linked-data/sdmx/2009/dimension#")
SDMXATTRIBUTE = Namespace("http://purl.org/linked-data/sdmx/2009/attribute#")
SDMXMEASURE= Namespace("http://purl.org/linked-data/sdmx/2009/measure#")
qb = Namespace("http://purl.org/linked-data/cube#")
INTERVAL = Namespace("http://www.w3.org/2006/time#")
COUNCILTAX = Namespace('http://data.gmdsp.org.uk/data/manchester/council-tax/')
DATEREF = Namespace('http://reference.data.gov.uk/id/day/')
COUNCILBAND = Namespace('http://data.gmdsp.org.uk/def/council/counciltax/council-tax-bands/')
class Store:
def __init__(self):
self.graph = Graph()
rt = self.graph.open(storeuri, create=False)
if rt == None:
# There is no underlying Sleepycat infrastructure, create it
self.graph.open(storeuri, create=True)
else:
assert rt == VALID_STORE, 'The underlying store is corrupt'
self.graph.bind('os', POST)
self.graph.bind('rdfs', RDFS)
self.graph.bind('geo', GEO)
self.graph.bind('vcard', VCARD)
self.graph.bind('scheme', SCHEME)
self.graph.bind('counciltax', COUNCILTAX)
self.graph.bind('qb', qb)
self.graph.bind('admingeo',ADMINGEO)
self.graph.bind('sdmx-attribute', SDMXATTRIBUTE)
self.graph.bind('interval', INTERVAL)
self.graph.bind('day', DATEREF)
self.graph.bind('councilband', COUNCILBAND)
def save(self):
self.graph.serialize(storeuri, format='pretty-xml')
self.graph.serialize(storeun3, format='n3')
def new_postcode(self, postcode):
pc = COUNCILTAX
def refArea(self):
d = COUNCILTAX["refArea"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference area")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refArea"]))
self.graph.add((d, RDFS["range"], POST["PostcodeArea"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refArea"]))
def refPeriod(self):
d = COUNCILTAX["refPeriod"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference period")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXDIMENSION["refPeriod"]))
self.graph.add((d, RDFS["range"], INTERVAL["Interval"]))
self.graph.add((d, qb["concept"], SDMXCONCEPT["refPeriod"]))
def refBand(self):
d = COUNCILTAX["refBand"]
self.graph.add((d, RDF.type, qb["Property"]))
self.graph.add((d, RDF.type, qb["DimensionProperty"]))
self.graph.add((d, RDFS["label"], Literal("reference band")))
self.graph.add((d, RDFS["domain"], URIRef("http://data.gmdsp.org.uk/def/council/counciltax/councilTaxBand")))
def countDef(self):
d = COUNCILTAX["countDef"]
self.graph.add((d, RDF.type, RDF["Property"]))
self.graph.add((d, RDF.type, qb["MeasureProperty"]))
self.graph.add((d, RDFS["label"], Literal("Council tax band count")))
self.graph.add((d, RDFS["subPropertyOf"], SDMXMEASURE["obsValue"]))
self.graph.add((d, RDFS["range"], XSD.decimal))
def new_DSD(self):
dsd = COUNCILTAX["DSD"]
self.graph.add((dsd, RDF.type, qb["DataStructureDefinition"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refArea"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refPeriod"]))
self.graph.add((dsd, qb["dimension"], COUNCILTAX["refBand"]))
self.graph.add((dsd, qb["measure"], COUNCILTAX["countDef"]))
def new_dataset(self):
ds = COUNCILTAX["dataset-le1"]
self.graph.add((ds, RDF.type, qb["DataSet"]))
self.graph.add((ds, RDFS["label"], Literal("Tax Banding")))
self.graph.add((ds, RDFS["comment"], Literal("xxxxx")))
self.graph.add((ds, qb["structure"], COUNCILTAX['data']))
def new_observation(self, HSC, LSOA_CODE, date, count):
observation = COUNCILTAX[LSOA_CODE.replace(" ", "-").lower()+HSC.replace(" ", "-").lower()]
self.graph.add((observation, RDF.type, qb['Observation']))
self.graph.add((observation, qb["dataSet"], URIRef('http://data.gmdsp.org.uk/data/manchester/council-tax')))
self.graph.add((observation, COUNCILTAX['refArea'], URIRef("http://data.ordnancesurvey.co.uk/id/postcodeunit/"+LSOA_CODE.replace(" ",""))))
self.graph.add((observation, COUNCILTAX['countDef'], Literal(count, datatype=XSD.integer)))
#refrence this to the list in the data set which Ian is making.
self.graph.add((observation, COUNCILTAX['refBand'], COUNCILBAND[LSOA_CODE]))
self.graph.add((observation, COUNCILTAX['refPeriod'], DATEREF[time.strftime('%Y-%m-%d',date)]))
def keyfn(x):
return x['Postcode']
def keyfnp(x):
return x['Band']
def main(argv=None):
s = Store()
s.refPeriod()
s.refArea()
s.refBand()
s.countDef()
s.new_dataset()
#s.new_DSD()
count = 0
a = pandas.DataFrame.from_csv('./Data/HSCDatabyWard.csv')
print a
for i in a.index.tolist():
for j in list(a.columns.values):
print "--------"
print j
if isNaN(i) == False:
print "HERE"
print(i, j, a.ix[i,j])
try:
print time.strptime(j.split()[2].split("/")[0], "%Y")
s.new_observation(j.split()[0]+" "+j.split()[1] , i, time.strptime(j.split()[2].split("/")[0], "%Y"), a.ix[i,j])
except:
print "Unexpected error:", sys.exc_info()[0]
#reader = csv.DictReader(open('./Data/HSCDatabyLSOA.csv', mode='rU'))
#for r in reader:
#
# s.new_observation(b, k, time.strptime("01/01/0001", "%d/%m/%Y"), len(n))
# count = count + 1
print "-- Saving --"
s.save()
def isNaN(num):
return num!= num
if __name__ == '__main__':
main()
| mit |
Phyks/replot | replot/adaptive_sampling.py | 1 | 5133 | """
Sample a 1D function to given tolerance by adaptive subdivision.
The result of sampling is a set of points that, if plotted, produces a smooth
curve with also sharp features of the function resolved.
This routine is useful in computing functions that are expensive to compute,
and have sharp features — it makes more sense to adaptively dedicate more
sampling points for the sharp features than the smooth parts.
Source: http://central.scipy.org/item/53/1/adaptive-sampling-of-1d-functions
License: Creative Commons Zero (almost public domain) http://scpyce.org/cc0
(Slightly) modified by Phyks (Lucas Verney).
"""
import numpy as np
def sample_function(func, points, tol=0.05, min_points=16, max_level=16,
sample_transform=None):
"""
Sample a 1D function to given tolerance by adaptive subdivision.
The result of sampling is a set of points that, if plotted,
produces a smooth curve with also sharp features of the function
resolved.
Parameters
----------
func : callable
Function func(x) of a single argument. It is assumed to be vectorized.
points : array-like, 1D
Initial points to sample, sorted in ascending order.
These will determine also the bounds of sampling.
tol : float, optional
Tolerance to sample to. The condition is roughly that the total
length of the curve on the (x, y) plane is computed up to this
tolerance.
min_point : int, optional
Minimum number of points to sample.
max_level : int, optional
Maximum subdivision depth.
sample_transform : callable, optional
Function w = g(x, y). The x-samples are generated so that w
is sampled.
Returns
-------
x : ndarray
X-coordinates
y : ndarray
Corresponding values of func(x)
Notes
-----
This routine is useful in computing functions that are expensive
to compute, and have sharp features --- it makes more sense to
adaptively dedicate more sampling points for the sharp features
than the smooth parts.
Examples
--------
>>> def func(x):
... '''Function with a sharp peak on a smooth background'''
... a = 0.001
... return x + a**2/(a**2 + x**2)
...
>>> x, y = sample_function(func, [-1, 1], tol=1e-3)
>>> import matplotlib.pyplot as plt
>>> xx = np.linspace(-1, 1, 12000)
>>> plt.plot(xx, func(xx), '-', x, y, '.')
>>> plt.show()
"""
x, y = _sample_function(func, points, values=None, mask=None, depth=0,
tol=tol,
min_points=min_points, max_level=max_level,
sample_transform=sample_transform)
return (x, y[0])
def _sample_function(func, points, values=None, mask=None, tol=0.05,
depth=0, min_points=16, max_level=16,
sample_transform=None):
points = np.asarray(points)
if values is None:
values = np.atleast_2d(func(points))
if mask is None:
mask = Ellipsis
if depth > max_level:
# recursion limit
return points, values
x_a = points[:-1][mask]
x_b = points[1:][mask]
x_c = .5*(x_a + x_b)
y_c = np.atleast_2d(func(x_c))
x_2 = np.r_[points, x_c]
y_2 = np.r_['-1', values, y_c]
j = np.argsort(x_2)
x_2 = x_2[..., j]
y_2 = y_2[..., j]
# -- Determine the intervals at which refinement is necessary
if len(x_2) < min_points:
mask = np.ones([len(x_2)-1], dtype=bool)
else:
# represent the data as a path in N dimensions (scaled to unit box)
if sample_transform is not None:
y_2_val = sample_transform(x_2, y_2)
else:
y_2_val = y_2
p = np.r_['0',
x_2[None, :],
y_2_val.real.reshape(-1, y_2_val.shape[-1]),
y_2_val.imag.reshape(-1, y_2_val.shape[-1])]
sz = (p.shape[0]-1) // 2
xscale = x_2.ptp(axis=-1)
yscale = abs(y_2_val.ptp(axis=-1)).ravel()
p[0] /= xscale
p[1:sz+1] /= yscale[:, None]
p[sz+1:] /= yscale[:, None]
# compute the length of each line segment in the path
dp = np.diff(p, axis=-1)
s = np.sqrt((dp**2).sum(axis=0))
s_tot = s.sum()
# compute the angle between consecutive line segments
dp /= s
dcos = np.arccos(np.clip((dp[:, 1:] * dp[:, :-1]).sum(axis=0), -1, 1))
# determine where to subdivide: the condition is roughly that
# the total length of the path (in the scaled data) is computed
# to accuracy `tol`
dp_piece = dcos * .5*(s[1:] + s[:-1])
mask = (dp_piece > tol * s_tot)
mask = np.r_[mask, False]
mask[1:] |= mask[:-1].copy()
# -- Refine, if necessary
if mask.any():
return _sample_function(func, x_2, y_2, mask, tol=tol, depth=depth+1,
min_points=min_points, max_level=max_level,
sample_transform=sample_transform)
else:
return x_2, y_2
| mit |
cosurgi/trunk | examples/simple-scene/simple-scene-plot.py | 2 | 2065 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=.2,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print("Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live.")
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(2./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
LucasGandel/TubeTK | Base/Python/pyfsa/core/fsa.py | 8 | 14937 | """fsa.py
This modules implements fine-structure analysis of undirected
graphs with (numeric) vertex attributes. It further contains
functionality to estimate the feature distribution using Gaussian
mixture models, or to build a Bag-of-Words representation from
a collection of feature vectors.
The idea of fine-structure analysis was recently proposed in
[1] Macindoe, O. and W. Richards, "Graph Comparison Using Fine
Structure Analysis". In: Social Computing '10
Note: We do not implement the LBG features of [1]; Our graph
features include a subset of the features proposed in [2]
[2] Li. G. et al., "Graph Classification via Topological and Label
Attributes". In: MLG '11
as well as some additional generic features available in networkx.
"""
__license__ = "Apache License, Version 2.0 (see TubeTK)"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: [email protected]"
__status__ = "Development"
# Graph handling
import networkx as nx
from networkx.algorithms import bipartite
# Machine learning
import sklearn.mixture.gmm as gm
from sklearn.cluster import KMeans
from collections import defaultdict
# Misc.
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
attr_list = [ #Average degree
lambda g : np.mean([e for e in g.degree().values()]),
# Average eccentricity
lambda g : np.mean([i for i in nx.eccentricity(g).values()]),
# Average closeness centrality
lambda g : np.mean([e for e in nx.closeness_centrality(g).values()]),
# Percentage of isolated points (i.e., degree(v) = 1)
lambda g : float(len(np.where(np.array(nx.degree(g).values())==1)[0]))/g.order(),
# Spectral radius (i.e., largest AM eigenvalue)
lambda g : np.abs(nx.adjacency_spectrum(g))[0],
# Spectral trace (i.e., sum of abs. eigenvalues)
lambda g : np.sum(np.abs(nx.adjacency_spectrum(g))),
# Label entropy, as defined in [2]
lambda g : label_entropy([e[1]['type'] for e in g.nodes(data=True)]),
# Mixing coefficient of attributes
lambda g : np.linalg.det(nx.attribute_mixing_matrix(g,'type')),
# Avg. #vertics with eccentricity == radius (i.e., central points)
lambda g : np.mean(float(len(nx.center(g)))/g.order()),
# Link impurity, as defined in [2]
lambda g : link_impurity(g),
# Diameter := max(eccentricity)
lambda g : nx.diameter(g),
# Radius := min(eccentricity)
lambda g : nx.radius(g)]
def link_impurity(g):
"""Compute link impurity of vertex-labeled graph.
Parameters
----------
g : networkx Graph
Input graph with vertex attribute stored as 'type'.
Returns
-------
impurity : float
Link impurity, see [2]
"""
if len(g.nodes()) == 1:
return 0
edges = g.edges()
u = np.array([g.node[a]['type'] for (a,b) in edges])
v = np.array([g.node[b]['type'] for (a,b) in edges])
return float(len(np.nonzero(u - v)[0]))/len(edges)
def label_entropy(labels):
"""Compute entropy of label vector.
Parameters
----------
labels : numpy array, shape (L,)
The input labels.
Returns
-------
entropy : float
Entropy of the label vector, see [2]
"""
H = np.bincount(labels)
p = H[np.nonzero(H)].astype(float)/np.sum(H)
return np.abs(-np.sum(p * np.log(p)))
def graph_from_file(graph_file, label_file=None, n_skip=0):
"""Load graph from an ASCII file containing adjacency information.
Parameters
----------
graph_file : string
Filename of the file containing all the adjaceny information. Format of
the adjaceny matrix file is as follows:
[Header, optional]
0 1 1
1 0 0
0 1 0
Interpretation: 3x3 adjaceny matrix, e.g., with an edge between vertices
(0,1) and (0,2), etc.
label_file : string
Filename of the label information file. Here is an example:
[Header, optional]
5
2
1
Interpretation: 3 labels, v_0 label: 5, v_1 label: 2 and v_2 label: 1.
n_skip : int (default: 0)
Skip n header lines.
Returns
-------
G : networkx Graph
"""
logger = logging.getLogger()
if not os.path.exists(graph_file):
raise Exception("Graph file %s not found!" % graph_file)
# Load adjacency information and ensure (0,1) weights
adj_info = np.genfromtxt(graph_file, skip_header=n_skip)
adj_info[np.where(adj_info >= 1)] = 1
G = nx.Graph(adj_info)
if not label_file is None:
if not os.path.exists(label_file):
raise Exception("Label file %d not found!" % label_file)
labels = np.genfromtxt(label_file, skip_header=n_skip)
logger.debug("Loaded labelfile %s!" % label_file)
if len(labels) != len(G):
raise Exception("Size mismatch for labels!")
for idx,l in enumerate(labels):
G.node[idx]['type'] = int(l)
logger.debug("Built graph from %s with %d vertices." %
(graph_file, len(G)))
return G
def compute_graph_features(g, radius=2, sps=None, omit_degenerate=False):
"""Compute graph feature vector(s).
Parameters
----------
g : networkx input graph with N vertices
The input graph on which we need to compute graph features.
radius: int (default: 2)
Compute graph features from local neighborhoods of vertices,
where the notion of neighborhood is defined by the number of
hops to the neighbor, i.e., the radius. This assumes that the
initial edges weights when computing the shortest-paths are 1.
sps: numpy matrix, shape (N, N) (default : None)
Matrix of shortest-path information for the graph g.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
Returns
-------
v_mat : numpy matrix, shape (N, D)
A D-dimensional feature matrix with one feature vector for
each vertex. Features are computed for the given radius.
"""
logger = logging.getLogger()
# Recompute shortest paths if neccessary
if sps is None:
sps = nx.floyd_warshall_numpy(g)
# Feature matrix representation of graph
v_mat = np.zeros([len(g),len(attr_list)])
# Iterate over all nodes
degenerates = []
for n in g.nodes():
# Get n-th row of shortest path matrix
nth_row = np.array(sps[n,:]).ravel()
# Find elements within a certain radius
within_radius = np.where(nth_row <= radius)
# Build a subgraph from those nodes
sg = g.subgraph(within_radius[0])
# Single vertex sg is considered degenerate
if len(sg.nodes()) == 1:
# Keep track of degenerates
degenerates.append(n)
if omit_degenerate:
continue
# Feature vector is 0-vector
v = np.zeros((len(attr_list),))
else:
v = [attr_fun(sg) for attr_fun in attr_list]
v_mat[n,:] = np.asarray(v)
logger.info("Found %d generate cases!" % len(degenerates))
if len(degenerates):
logger.info("Pruning %d degenerate cases ..." % len(degenerates))
v_mat = np.delete(v_mat, degenerates, axis=0)
logger.debug("Computed (%d x %d) feature matrix." %
(v_mat.shape[0], v_mat.shape[1]))
return v_mat
def run_fsa(data, radii=None, recompute=True, out=None, skip=0,
omit_degenerate=False):
"""Run (f)ine-(s)tructure (a)nalysis.
Paramters
---------
data : list of N 3-tuple of (graph files,label files, class
indices). We iterate over this list and compute fine-structure
topological features for each graph.
radii : list of 'int'
The desired neighborhood radii.
recompute: bool (default : True)
Recompote features, otherwise try to load them from disk.
In case we try to load from disk, filenames are constructed
based on the value of the 'out' parameter.
out : string (default : None)
Base file name for the generated data files, e.g.,
'/tmp/data'. Two files will be written to disk:
/tmp/data.mat
/tmp/data.idx
where 'data.mat' contains the feature matrix, i.e., one
feature vector per vertex; 'data.idx' contains the indices
that identify which graph each feature vector belongs to;
skip : int (default : 0)
Skip N header entries when loading graphs.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
Returns
-------
X : numpy matrix, shape (#vertices, len(radii)*D)
Feature matrix, where D is the total number of
features that are computed for one radius setting.
L : numpy array, shape (#total vertices,)
Identifies to which graph a feature vector belongs
to.
"""
logger = logging.getLogger()
if radii is None:
raise Exception("No radii given!")
if not out is None:
mat_file = "%s.mat" % out
idx_file = "%s.idx" % out
if not recompute:
if (os.path.exists(mat_file) and
os.path.exists(idx_file)):
logger.info("Loading data from file(s).")
data_mat = np.genfromtxt(mat_file)
data_idx = np.genfromtxt(idx_file)
return {'data_mat' : data_mat,
'data_idx' : data_idx}
data_mat = []
data_idx = []
for idx, (cf, lf, lab) in enumerate(data):
logger.info("Processing %d-th graph ..." % idx)
T, x = graph_from_file(cf, lf, skip), []
for r in radii:
x.append(compute_graph_features(T, r, None, omit_degenerate))
xs = np.hstack(tuple(x))
data_mat.append(xs)
data_idx.append(np.ones((xs.shape[0], 1))*idx)
data_mat = np.vstack(tuple(data_mat))
data_idx = np.vstack(tuple(data_idx))
if not out is None:
np.savetxt(mat_file, data_mat, delimiter=' ')
np.savetxt(idx_file, data_idx, delimiter=' ',fmt="%d")
return {'data_mat' : data_mat,
'data_idx' : data_idx}
def estimate_gm(X,components=3,seed=None):
"""Estimate a Gaussian mixture model.
Note: Uses diagonal covariance matrices.
Parameters
----------
X : numpy matrix, shape (N,D)
Matrix of data samples (i-th row is i-th sample vector).
c : int (default : 3)
Number of desired mixture components.
seed : int (default : None)
Seed for the random number generator.
Returns
-------
gm_obj : sklearn.mixture.gmm object
Estimated GMM.
"""
logger = logging.getLogger()
n, d = X.shape
logger.info("Estimating %d-comp. GMM from (%d x %d) ..." %
(components, n, d))
gm_obj = gm.GMM (n_components=components,
covariance_type='diag',
random_state=seed)
gm_obj.fit(X)
return gm_obj
def learn_codebook(X, codebook_size=200, seed=None):
"""Learn a codebook.
Run K-Means clustering to compute a codebook. K-Means
is initialized by K-Means++, uses a max. of 500 iter-
ations and 10 times re-initialization.
Paramters
---------
X : numpy matrix, shape (N,D)
Input data.
codebook_size : int (default : 200)
Desired number of codewords.
seed : int (default : None)
Seed for random number generator.
Returns
-------
cb : sklearn.cluster.KMeans object
KMeans object after fitting.
"""
logger = logging.getLogger()
logger.info("Learning codebook with %d words ..." % codebook_size)
# Run vector-quantization
cb = KMeans(codebook_size,
init="k-means++",
n_init=10,
max_iter=500,
random_state=seed)
cb.fit(X)
return cb
def bow(X, cb):
"""Compute a (normalized) BoW histogram.
Parameters
----------
X : numpy matrix, shape (N, D)
Input data.
cb : sklearn.cluster.KMeans
Already estimated codebook with C codewords.
Returns
-------
H : numpy array, shape (C,)
Normalized (l2-norm) BoW histogram.
"""
# Get nr. codewords
n,d = cb.cluster_centers_.shape
if d != X.shape[1]:
raise Exception("Dimensionality mismatch!")
# Compute closest cluster centers
assignments = cb.predict(X)
# Compute (normalized) BoW histogram
B = range(0,n+1)
return np.histogram(assignments,bins=B,density=True)[0]
def pp_gmm(X, models, argmax=True):
"""Compute the posterior probability of X under a set of GMM models.
Parameters
----------
X : numpy matrix, shape (N,D)
Data samples.
models : list of sklearn.mixture.gmm objects
List of C estimated GMMs.
argmax : boolean (default : True)
If 'True', the index of the class (represented by
it's model) with the highest a-posteriori probability
is computed. If 'False', the a-posteriori probability
if each class (represented by the model) is computed for
each row in X. Note: We assume equal prior probabilities
for each class.
Returns
-------
maxp : numpy.int64, or np.array with shape (N, C)
Depending on whether 'argmax' is 'True' or
'False', the index of the class with the highest
a-posteriori probability is returned, or the
a-posteriori probabilities under each model (for
each feature vector in X).
"""
n,d = X.shape
n_models = len(models)
ll = np.zeros((n,n_models),dtype="float32")
for i, model in enumerate(models):
ll[:,i] = np.asarray(model.score(X)).ravel()
if argmax:
# Column-wise sum
sump = np.sum(ll,axis=0)
# LogSumExp to compute MAP
t0 = np.max(sump)
t1 = np.exp(sump - (np.log(np.sum(np.exp(sump - t0))) + t0))
max_idx = np.argmax(t1)
return max_idx
else:
# LogSumExp to compute row-wise MAP
t0 = np.asmatrix(np.max(ll,axis=1)).transpose()
t1 = np.log(np.sum(np.exp(ll - np.tile(t0,(1,n_models))),axis=1)) + t0
prob = np.exp(np.asmatrix(ll) - t1)
return prob
| apache-2.0 |
MartinDelzant/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
drivendata/countable-care-3rd-place | src/train_predict_xg_bagging.py | 1 | 2957 | #!/usr/bin/env python
from __future__ import division
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import log_loss
from sklearn.ensemble import BaggingClassifier
from xgbc import *
import argparse
import logging
import numpy as np
import time
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG)
def train_predict(train_file, test_file, predict_valid_file, predict_test_file,
n_est=100, depth=4, lrate=.1, n_fold=5):
logging.info('Loading training and test data...')
X, y = load_svmlight_file(train_file)
X_tst, _ = load_svmlight_file(test_file)
X = X.todense()
X_tst = X_tst.todense()
xg = XGBoostClassifier(n_estimators=n_est,
eta=lrate,
max_depth=depth,
n_jobs=8)
clf = BaggingClassifier(base_estimator=xg,
n_estimators=5,
max_samples=0.9,
max_features=0.9,
random_state=42)
cv = StratifiedKFold(y, n_folds=n_fold, shuffle=True, random_state=2015)
logging.info('Cross validation...')
p_val = np.zeros_like(y)
lloss = 0.
for i_trn, i_val in cv:
clf.fit(X[i_trn], y[i_trn])
p_val[i_val] = clf.predict_proba(X[i_val])[:, 1]
lloss += log_loss(y[i_val], p_val[i_val])
logging.info('Log Loss = {:.4f}'.format(lloss / n_fold))
logging.info('Retraining with 100% data...')
clf.fit(X, y)
p_tst = clf.predict_proba(X_tst)[:, 1]
logging.info('Saving predictions...')
np.savetxt(predict_valid_file, p_val, fmt='%.6f')
np.savetxt(predict_test_file, p_tst, fmt='%.6f')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', required=True, dest='train_file')
parser.add_argument('--test-file', required=True, dest='test_file')
parser.add_argument('--predict-valid-file', required=True,
dest='predict_valid_file')
parser.add_argument('--predict-test-file', required=True,
dest='predict_test_file')
parser.add_argument('--n-est', type=int, dest='n_est')
parser.add_argument('--depth', type=int, dest='depth')
parser.add_argument('--lrate', type=float, dest='lrate')
args = parser.parse_args()
start = time.time()
train_predict(train_file=args.train_file,
test_file=args.test_file,
predict_valid_file=args.predict_valid_file,
predict_test_file=args.predict_test_file,
n_est=args.n_est,
depth=args.depth,
lrate=args.lrate)
logging.info('finished ({:.2f} min elasped)'.format((time.time() - start) /
60))
| mit |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/plotting/_misc.py | 2 | 21130 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import numpy as np
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.missing import notna
from pandas.compat import range, lrange, lmap, zip
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._style import _get_standard_colors
from pandas.plotting._tools import _subplots, _set_ticks_props
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""
Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
axes : :class:`matplotlib.axes.Axes`
See Also
--------
pandas.plotting.andrews_curves : Plot clustering visualization
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category')
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generates a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: Matplotlib axis object
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relaying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Pandas Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
greater or equal than the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds :
Options to pass to matplotlib plotting method.
Returns
-------
fig : matplotlib.figure.Figure
Matplotlib figure
See Also
--------
pandas.DataFrame.plot : Basic plotting for DataFrame objects.
pandas.Series.plot : Basic plotting for Series objects.
Examples
--------
.. plot::
:context: close-figs
>>> import numpy as np
>>> s = pd.Series(np.random.uniform(size=100))
>>> fig = pd.plotting.bootstrap_plot(s)
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds: keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels: bool, False
Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270',
'#4ECDC4', '#C7F464'))
>>> plt.show()
"""
if axvlines_kwds is None:
axvlines_kwds = {'linewidth': 1, 'color': 'black'}
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters
----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns
-------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + {lag})".format(lag=lag))
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
| mit |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_triangle_count_test.py | 10 | 2503 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests triangle count for ATK against the networkx implementation"""
import unittest
import networkx as nx
from sparktkregtests.lib import sparktk_test
class TriangleCount(sparktk_test.SparkTKTestCase):
def test_triangle_counts(self):
"""Build frames and graphs to exercise"""
super(TriangleCount, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.graph = self.context.graph.create(self.vertices, self.frame)
result = self.graph.triangle_count()
triangles = result.to_pandas(result.count())
# Create a dictionary of triangle count per triangle:
dictionary_of_triangle_count = {vertex['id']: (vertex['count'])
for (index, vertex) in triangles.iterrows()}
edge_list = self.frame.take(
n=self.frame.count(), columns=['src', 'dst'])
# build the network x result
g = nx.Graph()
g.add_edges_from(edge_list)
triangle_counts_from_networkx = nx.triangles(g)
self.assertEqual(
dictionary_of_triangle_count, triangle_counts_from_networkx)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jayflo/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
crichardson17/emgtemp | Metals_Sims/Dusty_sims/z_0.8_2.0_hden_3.5/z_0.8_2.0_hden_3_plots.py | 1 | 17181 | import matplotlib.pyplot as plt
import numpy as np
import urllib
import matplotlib.cm as cm
Low_Temp_Color = 'k'
Mid_Temp_Color = 'g'
High_Temp_Color = 'r'
#Temp_Color = 0.5
Cloudy_Sim_Color = 'cyan'
markersize = 40
SDSS_File = '/Users/Sam/Documents/emgtemp/data/4363_gr_5_0_err_dered.csv'
SDSS_Data = np.genfromtxt(SDSS_File,skip_header=1, delimiter = ',',dtype=float,unpack=True,names=True)
NII_6584 = SDSS_Data['Flux_NII_6583']
Ha_6562 = SDSS_Data['Flux_Ha_6562']
OI_6300 = SDSS_Data['Flux_OI_6300']
OIII_5006 = SDSS_Data['Flux_OIII_5006']
Hb_4861 = SDSS_Data['Flux_Hb_4861']
OIII_4363 = SDSS_Data['Flux_OIII_4363']
SII_6716 = SDSS_Data['Flux_SII_6716']
SII_6731 = SDSS_Data['Flux_SII_6730']
OII_3727 = SDSS_Data['Flux_OII_3726'] + SDSS_Data['Flux_OII_3728']
OIII_Hb = np.log10(OIII_5006/Hb_4861)
NII_Ha = np.log10(NII_6584/Ha_6562)
Temp_Ratio = np.log10(OIII_5006/OIII_4363)
S_Ratio = np.log10(SII_6716/SII_6731)
NO_Ratio = np.log10(NII_6584/OII_3727)
OI_Ratio = np.log10(OI_6300/Ha_6562)
O_Ratio = np.log10(OIII_5006/OII_3727)
S_Ha_Ratio = np.log10((SII_6716+SII_6731)/Ha_6562)
Cloudy_File = '/Users/Sam/Documents/emgtemp/Metals_Sims/Dusty_sims/z_0.8_2.0_hden_3.5/z_0.8_2.0_hden_3.5.csv'
Cloudy_Data = np.genfromtxt(Cloudy_File, delimiter = ',',dtype=float,unpack=True,names=True)
Cloudy_NII_6584 = Cloudy_Data['N__2__6584A']
Cloudy_Ha_6562 = Cloudy_Data['H__1__6563A']
Cloudy_OIII_5006 = Cloudy_Data['O__3__5007A']
Cloudy_Hb_4861 = Cloudy_Data['TOTL__4861A']
Cloudy_OIII_4363 = Cloudy_Data['TOTL__4363A']
Cloudy_SII_6716 = Cloudy_Data['S_II__6716A']
Cloudy_SII_6731 = Cloudy_Data['S_II__6731A']
Cloudy_OII_3727 = Cloudy_Data['TOTL__3727A']
Cloudy_OI_6300 = Cloudy_Data['O__1__6300A']
Cloudy_OIII_Hb = np.log10(Cloudy_OIII_5006/Cloudy_Hb_4861)
Cloudy_NII_Ha = np.log10(Cloudy_NII_6584/Cloudy_Ha_6562)
Cloudy_Temp_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OIII_4363)
Cloudy_S_Ratio = np.log10(Cloudy_SII_6716/Cloudy_SII_6731)
Cloudy_NO_Ratio = np.log10(Cloudy_NII_6584/Cloudy_OII_3727)
Cloudy_OI_Ratio = np.log10(Cloudy_OI_6300/Cloudy_Ha_6562)
Cloudy_O_Ratio = np.log10(Cloudy_OIII_5006/Cloudy_OII_3727)
Cloudy_S_Ha_Ratio = np.log10((Cloudy_SII_6716+Cloudy_SII_6731)/Cloudy_Ha_6562)
Grid_File = '/Users/Sam/Documents/emgtemp/Metals_Sims/Dusty_sims/z_0.6_2.0/z_0.6_2.0_sims.grd'
Grid_Data = np.genfromtxt(Grid_File,skip_header=1,delimiter = '\t',dtype=float,unpack=True)
Cloudy_Metals = Grid_Data[8,:]
Cloudy_Den = Grid_Data[6,:]
Cloudy_NII_Ha_array = np.reshape(Cloudy_NII_Ha,(6,-1))
Cloudy_OI_Ratio_array = np.reshape(Cloudy_OI_Ratio,(6,-1))
Cloudy_OIII_Hb_array = np.reshape(Cloudy_OIII_Hb,(6,-1))
Cloudy_Temp_Ratio_array = np.reshape(Cloudy_Temp_Ratio,(6,-1))
Cloudy_S_Ratio_array = np.reshape(Cloudy_S_Ratio,(6,-1))
Cloudy_NO_Ratio_array = np.reshape(Cloudy_NO_Ratio,(6,-1))
Cloudy_O_Ratio_array = np.reshape(Cloudy_O_Ratio,(6,-1))
Cloudy_S_Ha_Ratio_array = np.reshape(Cloudy_S_Ha_Ratio,(6,-1))
Cloudy_NII_Ha_transpose = np.transpose(Cloudy_NII_Ha_array)
Cloudy_OI_Ratio_transpose = np.transpose(Cloudy_OI_Ratio_array)
Cloudy_OIII_Hb_transpose = np.transpose(Cloudy_OIII_Hb_array)
Cloudy_Temp_Ratio_transpose = np.transpose(Cloudy_Temp_Ratio_array)
Cloudy_S_Ratio_transpose = np.transpose(Cloudy_S_Ratio_array)
Cloudy_NO_Ratio_transpose = np.transpose(Cloudy_NO_Ratio_array)
Cloudy_O_Ratio_transpose = np.transpose(Cloudy_O_Ratio_array)
Cloudy_S_Ha_Ratio_transpose = np.transpose(Cloudy_S_Ha_Ratio_array)
#cold_data_colors = [plt.cm.Blues(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#mid_data_colors = [plt.cm.Greens(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
#hot_data_colors = [plt.cm.Reds(i) for i in np.linspace(0,1,len(SDSS_Data['z']))]
u_colors = [plt.cm.Reds(i) for i in np.linspace(0.25,1,7)]
metal_colors = [plt.cm.Blues(i) for i in np.linspace(0.25,1,6)]
#This is bad^ 3 and 7 are the number of densities and ionization parameters used, but ideally this wouldn't be hardcoded.
#sf_count = 0.0
#comp_count = 0.0
#agn_count = 0.0
#liner_count = 0.0
#amb_count = 0.0
shape = ['v']
#####################################################################################################
def getShape(NII_Ha, OIII_Hb, S_Ha_Ratio, OI_Ratio):
# Star forming
if OIII_Hb < 0.61/(NII_Ha-0.05)+1.3 and \
OIII_Hb < 0.72/(S_Ha_Ratio-0.32)+1.30 and \
OIII_Hb < 0.73/(OI_Ratio+0.59)+1.33:
shape = 'x'
#sf_count = sf_count+1
# Composite
elif 0.61/(NII_Ha-0.05)+1.3 < OIII_Hb and \
0.61/(NII_Ha-0.47)+1.19 > OIII_Hb:
shape = '+'
#comp_count = comp_count+1
# AGN
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
(1.89*S_Ha_Ratio)+0.76 < OIII_Hb and \
(1.18*OI_Ratio)+1.30 < OIII_Hb:
shape = 'D'
#agn_count = agn_count+1
# LINERs
elif 0.61/(NII_Ha-0.47)+1.19 < OIII_Hb and \
0.72/(S_Ha_Ratio-0.32)+1.30 < OIII_Hb and \
OIII_Hb < (1.89*S_Ha_Ratio)+0.76 and \
0.73/(OI_Ratio+0.59)+1.33 < OIII_Hb and \
OIII_Hb < (1.18*OI_Ratio)+1.30:
shape = 's'
#liner_count = liner_count+1
else:
# Ambiguous
shape = '*'
#amb_count = amb_count+1
return shape
#####################################################################################################
#####################################################################################################
def getColor(OIII_5006, OIII_4363):
Temp_Color = 'k'
if OIII_5006/OIII_4363<50:
#Temp_Color = '0.25'
Temp_Color = plt.cm.gray(0.2)
#red = red + 1
elif OIII_5006/OIII_4363>50 and OIII_5006/OIII_4363<100:
#Temp_Color = '0.5'
Temp_Color = plt.cm.gray(0.5)
#green = green + 1
elif OIII_5006/OIII_4363>100:
#Temp_Color = '0.75'
Temp_Color = plt.cm.gray(0.75)
#black = black + 1
else:
print ("error")
return Temp_Color
#####################################################################################################
fig = plt.figure(21)
fig.subplots_adjust(wspace=0.4,hspace=0.4)
sp1 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
#print(Temp_Color)
plt.scatter(NII_Ha[i],OIII_Hb[i],s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print (Temp_Color)
#print(sf_count)
#print(comp_count)
#print(agn_count)
#print(liner_count)
#print(amb_count)
#print(red)
#print(green)
#print(black)
#print(counter)
plt.xlim(-2.5,0.5)
plt.ylim(-1,1.3)
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("BPT Diagram")
#plt.scatter(Cloudy_NII_Ha,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp1.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OIII_Hb_array,linestyle = '--', lw = '2')
sp1.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OIII_Hb_transpose, lw = '2')
plt.legend([plt.scatter([],[],color='0.75', s = markersize), plt.scatter([],[],color='0.5', s = markersize), plt.scatter([],[],color='0.25', s = markersize)], (r"T$_e$<1.17*10$^4$",r"1.17*10$^4$<T$_e$<1.54*10$^4$",r"T$_e$>1.54*10$^4$"),scatterpoints = 1, loc = 'lower left',fontsize =8)
x=np.linspace(-1.5,0.3,50)
y=((.61/(x-.47))+1.19)
plt.plot(x,y,color=Low_Temp_Color)
x3=np.linspace(-1,-0.2,50)
y3=((.61/(x3-.05)+1.3))
plt.plot(x3,y3,linestyle='--',color='k')
#counter=0
sp2 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],Temp_Ratio[i], s = markersize, marker = shape, color = Temp_Color, edgecolor = 'none')
#print(counter)
plt.ylabel(r"log([OIII] $\lambda$5007/4363)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Temperature")
plt.ylim(0,3)
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_Temp_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp2.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_Temp_Ratio_array,linestyle = '--', lw = '2')
sp2.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_Temp_Ratio_transpose, lw = '2')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp3 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],S_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([SII] $\lambda$6717/6731)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.ylim(-1.0,1.0)
plt.xlim(-2.5,0.5)
plt.title("Density")
#plt.scatter(Cloudy_NII_Ha,Cloudy_S_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp3.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_S_Ratio_array,linestyle = '--', lw = '2')
sp3.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_S_Ratio_transpose, lw = '2')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp4 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],NO_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([NII] $\lambda$6584/[OII] $\lambda$3727)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("Metallicity")
plt.xlim(-2.5,0.5)
#plt.scatter(Cloudy_NII_Ha,Cloudy_NO_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp4.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_NO_Ratio_array,linestyle = '--', lw = '2')
sp4.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_NO_Ratio_transpose, lw = '2')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
plt.show()
plt.suptitle('n$_H$ = 3.5, -0.5 < U < -3.5, 0.5 < Z < 2.0')
plt.savefig("Z_0.5_2.0_Sims_Plots.pdf", dpi = 600)
fig2 = plt.figure(22)
sp5 = plt.subplot(221)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(NII_Ha[i],OI_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OI] $\lambda$6300/H$\alpha$)")
plt.xlabel(r"log ([NII] $\lambda$6584/H$\alpha$)")
plt.title("OI_6300")
plt.xlim(-2.5,0.5)
plt.ylim(-2.5,0)
#plt.scatter(Cloudy_NII_Ha,Cloudy_OI_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp5.set_color_cycle(u_colors)
plt.plot(Cloudy_NII_Ha_array,Cloudy_OI_Ratio_array,linestyle = '--', lw = '2')
sp5.set_color_cycle(metal_colors)
plt.plot(Cloudy_NII_Ha_transpose,Cloudy_OI_Ratio_transpose, lw = '2')
plt.legend([plt.scatter([],[],color='.75', s = markersize, marker = 'x', edgecolor = 'none'),plt.scatter([],[],color='0.75', s = markersize, marker = '+', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 'D', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = 's', edgecolor = 'none'), plt.scatter([],[],color='.75', s = markersize, marker = '*', edgecolor = 'none')], ("Star-Forming","Composite","AGN","LINER","Ambiguous"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp6 = plt.subplot(222)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("OI_6300 vs. OIII_5007")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp6.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_OIII_Hb_array,linestyle = '--', lw = '2')
sp6.set_color_cycle(metal_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2')
x6 = np.linspace(-2.5,-0.6,50)
y6 = ((.73/(x6+0.59))+1.33)
plt.plot(x6,y6,color = 'k')
x7 = np.linspace(-1.125,0.25,50)
y7 = (1.18*x7) + 1.30
plt.plot(x7,y7, color = 'b')
plt.ylim(-1,1.5)
plt.xlim(-2.5,0.5)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp7 = plt.subplot(223)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(OI_Ratio[i],O_Ratio[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/[OII]$\lambda$3727)")
plt.xlabel(r"log ([OI] $\lambda$6300/H$\alpha$)")
plt.title("Groves Diagram")
#plt.scatter(Cloudy_OI_Ratio,Cloudy_O_Ratio,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp7.set_color_cycle(u_colors)
plt.plot(Cloudy_OI_Ratio_array,Cloudy_O_Ratio_array,linestyle = '--', lw = '2')
sp7.set_color_cycle(metal_colors)
plt.plot(Cloudy_OI_Ratio_transpose,Cloudy_O_Ratio_transpose, lw = '2')
x1 = np.linspace(-2.0,-.25,50)
y1 = ((-1.701*x1)-2.163)
x2 = np.linspace(-1.05998,0,50)
y2 = x2 + 0.7
plt.plot(x2,y2, color = 'k')
plt.plot(x1,y1, color = 'k')
plt.xlim(-2.5,0)
plt.ylim(-1.5,1)
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
sp8 = plt.subplot(224)
for i in range(0,len(SDSS_Data['z'])):
shape = getShape(NII_Ha[i], OIII_Hb[i], S_Ha_Ratio[i], OI_Ratio[i])
Temp_Color = getColor(OIII_5006[i], OIII_4363[i])
plt.scatter(S_Ha_Ratio[i],OIII_Hb[i], s = markersize, marker = shape, c = Temp_Color, edgecolor = 'none')
plt.ylabel(r"log([OIII] $\lambda$5007/H$\beta$)")
plt.xlabel(r"log ([SII]/H$\alpha$)")
plt.title("OIII_5007 vs. SII")
plt.ylim(-1,1.5)
x4 = np.linspace(-0.32,0.25,50)
y4 = ((1.89*x4)+0.76)
x5 = np.linspace(-1.5,0.25,50)
y5 = ((0.72/(x - 0.32))+1.3)
plt.plot(x5,y5,color = 'k')
plt.plot(x4,y4,color = 'b')
#plt.scatter(Cloudy_S_Ha_Ratio,Cloudy_OIII_Hb,c=Cloudy_Sim_Color, s = markersize, edgecolor ='none')
sp8.set_color_cycle(u_colors)
plt.plot(Cloudy_S_Ha_Ratio_array,Cloudy_OIII_Hb_array,linestyle = '--', lw = '2')
sp8.set_color_cycle(metal_colors)
plt.plot(Cloudy_S_Ha_Ratio_transpose,Cloudy_OIII_Hb_transpose, lw = '2')
plt.suptitle('n$_H$ = 3.5, -0.5 < U < -3.5, 0.5 < Z < 2.0')
#plt.legend([plt.scatter([],[],color=Low_Temp_Color, s = markersize), plt.scatter([],[],color=Mid_Temp_Color, s = markersize), plt.scatter([],[],color=High_Temp_Color, s = markersize),plt.scatter([],[],c=Cloudy_Sim_Color, s = markersize, edgecolor = 'none')], (r"$\frac{OIII[5007]}{OIII[4363]}$<50.0",r"$50.0<\frac{OIII[5007]}{OIII[4363]}<100.0$",r"$\frac{OIII[5007]}{OIII[4363]}$>100.0","Cloudy Simulation"),scatterpoints = 1, loc = 'lower left',fontsize =8)
#plt.savefig("Metallicity Sim Plots1.pdf")
plt.show() | mit |
akrherz/iem | htdocs/plotting/auto/scripts100/p170.py | 1 | 5802 | """METAR frequency"""
import calendar
import datetime
from pandas.io.sql import read_sql
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"TS": "All Thunder Reports (TS)",
"VCTS": "Thunder in Vicinity (VCTS)",
"1": "Thunder Reports (excluding VCTS)",
"-SN": "Light Snow (-SN)",
"PSN": "Heavy Snow (+SN)", # +SN causes CGI issues
"SN": "Any Snow (*SN*)",
"FZFG": "Freezing Fog (FZFG)",
"FZRA": "Freezing Rain (FZRA)",
"FG": "Fog (FG)",
"BLSN": "Blowing Snow (BLSN)",
}
PDICT2 = {
"day": "Count Distinct Days per Month per Year",
"hour": "Count Distinct Hours per Month per Year",
}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This chart totals the number of distinct calendar
days per month that a given present weather condition is reported within
the METAR data feed. The calendar day is computed for the local time zone
of the reporting station.
<p>The reporting of present weather codes within METARs has changed over
the years and there is some non-standard nomenclature used by some sites.
The thunder (TS) reports are delineated into three categories here to
hopefully allow more accurate statistics.
<ul>
<li><strong>All Thunder Reports (TS)</strong> includes any
<code>TS</code> mention in any present weather code</li>
<li><strong>Thunder in Vicinity (VCTS)</strong> includes any
<code>VCTS</code> mention in any present weather code, for example,
<code>VCTSRA</code> would match.</li>
<li><strong>Thunder Reports (excluding VCTS)</strong> includes most
<code>TS</code> mentions, but not any including <code>VC</code></li>
</ul>
"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
label="Select Station:",
network="IA_ASOS",
),
dict(
type="year",
name="year",
label="Year to Highlight:",
default=datetime.date.today().year,
min=1973,
),
dict(
type="select",
name="var",
default="FG",
label="Present Weather Option:",
options=PDICT,
),
dict(
type="select",
name="w",
default="day",
label="How to aggregate the data:",
options=PDICT2,
),
]
return desc
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
year = ctx["year"]
pweather = ctx["var"]
if pweather == "PSN":
pweather = "+SN"
PDICT["+SN"] = PDICT["PSN"]
tzname = ctx["_nt"].sts[station]["tzname"]
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
syear = max([1973, ab.year])
limiter = "array_to_string(wxcodes, '') LIKE '%%" + pweather + "%%'"
if pweather == "1":
# Special in the case of non-VCTS
limiter = (
"ARRAY['TS'::varchar, '-TSRA'::varchar, 'TSRA'::varchar, "
"'-TS'::varchar, '+TSRA'::varchar, '+TSSN'::varchar,"
"'-TSSN'::varchar, '-TSDZ'::varchar] && wxcodes"
)
trunc = "day" if ctx["w"] == "day" else "hour"
df = read_sql(
f"""
WITH data as (
SELECT distinct date_trunc(%s,
valid at time zone %s + '10 minutes'::interval) as datum
from alldata where station = %s and {limiter}
and valid > '1973-01-01' and report_type = 2)
SELECT extract(year from datum)::int as year,
extract(month from datum)::int as month,
count(*) from data GROUP by year, month ORDER by year, month
""",
pgconn,
params=(trunc, tzname, station),
index_col=None,
)
if df.empty:
raise NoDataFound("No database entries found for station, sorry!")
title = (
"[%s] %s %s Events\n" "(%s-%s) Distinct %s with '%s' Reported%s"
) % (
station,
ctx["_nt"].sts[station]["name"],
PDICT[pweather],
syear,
datetime.date.today().year,
"Calendar Dates" if ctx["w"] == "day" else "Hourly Observations",
pweather if pweather != "1" else "TS",
" with at least one hourly report" if ctx["w"] == "day" else "",
)
(fig, ax) = figure_axes(title=title)
df2 = df[df["year"] == year]
if not df2.empty:
ax.bar(
df2["month"].values - 0.2,
df2["count"].values,
width=0.4,
fc="r",
ec="r",
label="%s" % (year,),
)
for x, y in zip(df2["month"].values, df2["count"].values):
ax.text(x - 0.2, y + 0.2, f"{y:.0f}", ha="center")
df2 = df.groupby("month").sum()
years = (datetime.date.today().year - syear) + 1
yvals = df2["count"] / years
ax.bar(
df2.index.values + 0.2, yvals, width=0.4, fc="b", ec="b", label="Avg"
)
for x, y in zip(df2.index.values, yvals):
ax.text(x + 0.2, y + 0.2, f"{y:.1f}", ha="center")
ax.set_xlim(0.5, 12.5)
ax.set_xticks(range(1, 13))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_ylabel(
"%s Per Month" % ("Days" if ctx["w"] == "days" else "Hours",)
)
ax.set_ylim(top=(ax.get_ylim()[1] + 2))
ax.legend(loc="best")
ax.grid(True)
return fig, df
if __name__ == "__main__":
plotter(dict(zstation="ALO", year=2017, var="FG", network="IA_ASOS"))
| mit |
ColdSauce/solvinga858 | solve.py | 1 | 1526 | import csv
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def is_number(c):
try:
int(c)
return True
except ValueError:
return False
def get_amount_chars_together(st):
total = 0
previous_was_char = False
for c in st:
if not is_number(c):
if not previous_was_char:
total = total + 1
previous_was_char = True
else:
previous_was_char = False
return total
def get_index_biggest_list(total_list):
biggest_len = 0
biggest_index = -1
for index, item in enumerate(total_list):
if len(item) > biggest_len:
biggest_index = index
biggest_len = len(item)
return biggest_index
def main():
total_lists = []
with open('all.csv', 'rb') as f:
rea = csv.reader(f, delimiter=',')
for row in rea:
all_posts = row[2].split()
total_lists.append(map(get_amount_chars_together, all_posts))
biggest_list_index = get_index_biggest_list(total_lists)
elements = [[] for x in range(0, len(total_lists[biggest_list_index]))]
for l in total_lists:
for x in range(0, len(total_lists[biggest_list_index])):
if len(l) > x:
elements[x].append(l[x])
for index, element in enumerate(elements):
plt.hist(element, bins = 50,range = (0,20), alpha = 0.75)
plt.savefig('out/' + str(index))
plt.clf()
if __name__ == '__main__':
main()
| mit |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/api/appinfo.py | 1 | 95812 | # Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppInfo tools.
This library allows you to work with AppInfo records in memory, as well as store
and load from configuration files.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
# Parts of the code in this file are duplicated in
# //java/com/google/apphosting/admin/legacy/...
# This is part of an ongoing effort to replace the deployment API.
# Until we can delete this code, please check to see if your changes need
# to be reflected in the java code. For questions, talk to clouser@ or
import logging
import os
import re
import string
import sys
import wsgiref.util
# pylint: disable=g-import-not-at-top
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
# This case covers both Python 2.5 and unittests, which are 2.5 only.
from googlecloudsdk.third_party.appengine.api import validation
from googlecloudsdk.third_party.appengine.api import yaml_builder
from googlecloudsdk.third_party.appengine.api import yaml_listener
from googlecloudsdk.third_party.appengine.api import yaml_object
from googlecloudsdk.third_party.appengine.api import appinfo_errors
from googlecloudsdk.third_party.appengine.api import backendinfo
# pylint: enable=g-import-not-at-top
# Regular expression for matching URL, file, URL root regular expressions.
# `url_root` is identical to url except it additionally imposes not ending with
# *.
# TODO(user): `url_root` should generally allow a URL but not a regex or
# glob.
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
# Regular expression for matching cache expiration deltas.
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_NON_WHITE_SPACE_REGEX = r'^\S+$'
# Regular expression for matching service names.
# TODO(arb): this may need altering so as to not leak unreleased service names
# TODO(user): Re-add sms to list of services.
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
# Regular expression for matching page names.
_PAGE_NAME_REGEX = r'^.+$'
# Constants for interpreting expiration deltas.
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
# Constant values from `apphosting/base/constants.h`
# TODO(user): Maybe a python constants file.
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
# See b/5485871 for why this is 100 and not 63.
# NOTE(user): See b/5485871 for why this is different from the
# `apphosting/base/constants.h` value.
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
# The character separating the partition from the domain.
PARTITION_SEPARATOR = '~'
# The character separating the domain from the display-app-id.
DOMAIN_SEPARATOR = ':'
# The character separating major and minor versions.
VERSION_SEPARATOR = '.'
# The character separating module from module version.
MODULE_SEPARATOR = ':'
# The name of the default module
DEFAULT_MODULE = 'default'
# Regular expression for ID types. Defined in apphosting/base/id_util.cc.
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
# NOTE(user,user): These regexes have been copied to multiple other
# locations in google.apphosting so we don't have to pull this file into
# python_lib for other modules to work in production.
# Other known locations as of 2016-08-15:
# - java/com/google/apphosting/admin/legacy/LegacyAppInfo.java
# - apphosting/client/app_config_old.cc
# - apphosting/api/app_config/app_config_server2.cc
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
# Note that this regex will not allow zero-prefixed numbers, e.g. 0001.
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
# This enforces that we will only accept a single decimal point of accuracy at
# the granularity of seconds and no decimal point with a granularity of
# milliseconds.
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
GCE_RESOURCE_NAME_REGEX = r'^[a-z]([a-z\d-]{0,61}[a-z\d])?$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
# Note(user): This must match api/app_config.py
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9\-]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
ENV_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
# Used for missing values; see http://b/issue?id=2073962.
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
# Expression meaning to skip no files, which is the default for AppInclude.
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
# Attributes for `URLMap`
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
# Attributes for `AppInfoExternal`
APPLICATION = 'application'
PROJECT = 'project' # An alias for 'application'
MODULE = 'module'
SERVICE = 'service'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
BETA_SETTINGS = 'beta_settings'
VM_HEALTH_CHECK = 'vm_health_check'
HEALTH_CHECK = 'health_check'
RESOURCES = 'resources'
NETWORK = 'network'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
ENDPOINTS_API_SERVICE = 'endpoints_api_service'
ENV = 'env'
ENTRYPOINT = 'entrypoint'
RUNTIME_CONFIG = 'runtime_config'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
SOURCE_REPO_RE_STRING = r'^[a-z][a-z0-9\-\+\.]*:[^#]*$'
SOURCE_REVISION_RE_STRING = r'^[0-9a-fA-F]+$'
# Maximum size of all source references (in bytes) for a deployment.
SOURCE_REFERENCES_MAX_SIZE = 2048
INSTANCE_CLASS = 'instance_class'
# Attributes for Standard App Engine (only) AutomaticScaling.
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
# Attributes for Managed VMs (only) AutomaticScaling. These are very
# different than Standard App Engine because scaling settings are
# mapped to Cloud Autoscaler (as opposed to the clone scheduler). See
# AutoscalingConfig in
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
# Managed VMs Richer Autoscaling. These (MVMs only) scaling settings
# are supported for both vm:true and env:2|flex, but are not yet
# publicly documented.
TARGET_NETWORK_SENT_BYTES_PER_SEC = 'target_network_sent_bytes_per_sec'
TARGET_NETWORK_SENT_PACKETS_PER_SEC = 'target_network_sent_packets_per_sec'
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC = 'target_network_received_bytes_per_sec'
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC = (
'target_network_received_packets_per_sec')
TARGET_DISK_WRITE_BYTES_PER_SEC = 'target_disk_write_bytes_per_sec'
TARGET_DISK_WRITE_OPS_PER_SEC = 'target_disk_write_ops_per_sec'
TARGET_DISK_READ_BYTES_PER_SEC = 'target_disk_read_bytes_per_sec'
TARGET_DISK_READ_OPS_PER_SEC = 'target_disk_read_ops_per_sec'
TARGET_REQUEST_COUNT_PER_SEC = 'target_request_count_per_sec'
TARGET_CONCURRENT_REQUESTS = 'target_concurrent_requests'
# Attributes for ManualScaling
INSTANCES = 'instances'
# Attributes for BasicScaling
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
# Attributes for AdminConsole
PAGES = 'pages'
NAME = 'name'
# Attributes for EndpointsApiService
ENDPOINTS_NAME = 'name'
CONFIG_ID = 'config_id'
# Attributes for ErrorHandlers
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
# Attributes for BuiltinHandler
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
# Attributes for `VmHealthCheck`. Please refer to message `VmHealthCheck` in
# `request_path` and `port` are not configurable yet.
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
RESTART_THRESHOLD = 'restart_threshold'
HOST = 'host'
# Attributes for Resources.
CPU = 'cpu'
MEMORY_GB = 'memory_gb'
DISK_SIZE_GB = 'disk_size_gb'
# Attributes for Resources:Volumes.
VOLUMES = 'volumes'
VOLUME_NAME = 'name'
VOLUME_TYPE = 'volume_type'
SIZE_GB = 'size_gb'
# Attributes for Network.
FORWARDED_PORTS = 'forwarded_ports'
INSTANCE_TAG = 'instance_tag'
NETWORK_NAME = 'name'
SUBNETWORK_NAME = 'subnetwork_name'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
latest_version,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for `_VersionedLibrary`.
Args:
name: The name of the library; for example, `django`.
url: The URL for the library's project page; for example,
`http://www.djangoproject.com/`.
description: A short description of the library; for example,
`A framework...`.
supported_versions: A list of supported version names, ordered by release
date; for example, `["v1", "v2", "v3"]`.
latest_version: The version of the library that will be used when you
specify `latest.` The rule of thumb is that this value should be the
newest version that is neither deprecated nor experimental; however
this value might be an experimental version if all of the supported
versions are either deprecated or experimental.
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime, or `None` if the library is not available
by default; for example, `v1`.
deprecated_versions: A list of the versions of the library that have been
deprecated; for example, `["v1", "v2"]`.
experimental_versions: A list of the versions of the library that are
currently experimental; for example, `["v1"]`.
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.latest_version = latest_version
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
"""Retrieves the versions of the library that are not deprecated.
Returns:
A list of the versions of the library that are not deprecated.
"""
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'clearsilver',
'http://www.clearsilver.net/',
'A fast, powerful, and language-neutral HTML template system.',
['0.10.5'],
latest_version='0.10.5',
),
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5', '1.9'],
latest_version='1.4',
),
_VersionedLibrary(
'enum',
'https://pypi.python.org/pypi/enum34',
'A backport of the enum module introduced in python 3.4',
['0.9.23'],
latest_version='0.9.23',
),
_VersionedLibrary(
'endpoints',
'https://developers.google.com/appengine/docs/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0'],
latest_version='1.0',
),
_VersionedLibrary(
'grpcio',
'http://http://www.grpc.io/',
'A high performance general RPC framework',
['1.0.0'],
latest_version='1.0.0',
default_version='1.0.0',
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5'],
latest_version='2.3',
experimental_versions=['2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15', '0.23'],
latest_version='0.15',
),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
latest_version='1.2.0',
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4', '1.2.4', '1.2.5'],
latest_version='1.2.5',
experimental_versions=['1.2.4b4', '1.2.4', '1.2.5']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1'],
latest_version='1.6.1',
),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7'],
latest_version='1.1.7',
),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
latest_version='1.0',
default_version='1.0',
),
_VersionedLibrary(
'pytz',
'https://pypi.python.org/pypi/pytz?',
'A library for cross-platform timezone calculations',
['2016.4'],
latest_version='2016.4',
default_version='2016.4',
),
_VersionedLibrary(
'crcmod',
'http://crcmod.sourceforge.net/',
'A library for generating Cyclic Redundancy Checks (CRC).',
['1.7'],
latest_version='1.7',
),
_VersionedLibrary(
'PyAMF',
'http://pyamf.appspot.com/index.html',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1', '0.7.2'],
latest_version='0.6.1',
experimental_versions=['0.7.2'],
),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptography functions such as random number generation.',
['2.3', '2.6', '2.6.1'],
latest_version='2.6',
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11'],
latest_version='0.6c11',
),
_VersionedLibrary(
'six',
'https://pypi.python.org/pypi/six',
'Abstract differences between py2.x and py3',
['1.9.0'],
latest_version='1.9.0',
),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7', '2.7.11'],
latest_version='2.7',
),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
latest_version='2.5.2',
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
latest_version='1.2.3',
default_version='1.1.1',
),
_VersionedLibrary(
'werkzeug',
'http://www.werkzeug.pocoo.org/',
'A WSGI utility library.',
['0.11.10'],
latest_version='0.11.10',
default_version='0.11.10',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
latest_version='3.10',
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
# A mapping from third-party name/version to a list of that library's
# dependencies.
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s"')
# See RFC 2616 section 2.2.
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
# Source: http://www.cs.tut.fi/~jkorpela/http.html
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
# The minimum cookie length (i.e. number of bytes) that HTTP clients should
# support, per RFCs 2109 and 2965.
_MAX_COOKIE_LENGTH = 4096
# trailing NULL character, which is why this is not 2048.
_MAX_URL_LENGTH = 2047
# We allow certain headers to be larger than the normal limit of 8192 bytes.
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS = 10240
_CANNED_RUNTIMES = ('contrib-dart', 'dart', 'go', 'php', 'php55', 'python',
'python27', 'python-compat', 'java', 'java7', 'vm',
'custom', 'nodejs', 'ruby')
_all_runtimes = _CANNED_RUNTIMES
def GetAllRuntimes():
"""Returns the list of all valid runtimes.
This list can include third-party runtimes as well as canned runtimes.
Returns:
Tuple of strings.
"""
return _all_runtimes
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
# Common fields.
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
# Python/CGI fields.
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values to what `http_headers` allows.
`http_headers` is an static handler key; it applies to handlers with
`static_dir` or `static_files` keys. The following code is an example of how
`http_headers` is used::
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
# TODO(user): I don't think there's any reason to disallow users
# from setting Content-Encoding, but other parts of the system prevent
# this; therefore, we disallow it here. See the following discussion:
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-webkit-csp': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'content-security-policy-report-only':
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in `HttpHeadersDict` are valid.
`HttpHeadersDict` contains a list of headers. An instance is used as
`HttpHeadersDict`'s `KEY_VALIDATOR`.
"""
def Validate(self, name, unused_key=None):
"""Returns an argument, or raises an exception if the argument is invalid.
HTTP header names are defined by `RFC 2616, section 4.2`_.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: An argument cannot be used as an
HTTP header name.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
original_name = name
# Make sure only ASCII data is used.
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
# HTTP headers are case-insensitive.
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
# Request headers shouldn't be used in responses.
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
# Make sure that none of the reserved prefixes is used.
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in `HttpHeadersDict` are valid.
An instance is used as `HttpHeadersDict`'s `VALUE_VALIDATOR`.
"""
def Validate(self, value, key=None):
"""Returns a value, or raises an exception if the value is invalid.
According to `RFC 2616 section 4.2`_ header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string"::
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
A value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: An argument cannot be used as an
HTTP header value.
.. _RFC 2616, section 4.2:
https://www.ietf.org/rfc/rfc2616.txt
"""
# Make sure only ASCII data is used.
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
# HTTP headers are case-insensitive.
key = key.lower()
# TODO(user): This is the same check that appserver performs, but it
# could be stronger. e.g. `"foo` should not be considered valid, because
# HTTP does not allow unclosed double quote marks in header values, per
# RFC 2616 section 4.2.
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
# The `>=` operator here is a little counter-intuitive. The reason for it
# is that I'm trying to follow the
# `HTTPProto::IsValidHeader` implementation.
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
# If execution reaches this point, it generally means the header is too
# long, but there are a few exceptions, which are listed in the next
# dict.
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
# We are dealing with one of the exceptional headers with larger maximum
# value lengths.
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to `header_name`. If more than one such
value is in `self`, one of the values is selected arbitrarily and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
# TODO(user): Perhaps, this functionality should be part of
# `validation.ValidatedDict`.
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Maps from URLs to handlers.
This class acts similar to a union type. Its purpose is to describe a mapping
between a set of URLs and their handlers. The handler type of a given instance
is determined by which `handler-id` attribute is used.
Every mapping can have one and only one handler type. Attempting to use more
than one `handler-id` attribute will cause an `UnknownHandlerType` to be
raised during validation. Failure to provide any `handler-id` attributes will
cause `MissingHandlerType` to be raised during validation.
The regular expression used by the `url` field will be used to match against
the entire URL path and query string of the request; therefore, partial maps
will not be matched. Specifying a `url`, such as `/admin`, is the same as
matching against the regular expression `^/admin$`. Don't start your matching
`url` with `^` or end them with `$`. These regular expressions won't be
accepted and will raise `ValueError`.
Attributes:
login: Specifies whether a user should be logged in to access a URL.
The default value of this argument is `optional`.
secure: Sets the restriction on the protocol that can be used to serve this
URL or handler. This value can be set to `HTTP`, `HTTPS` or `either`.
url: Specifies a regular expression that is used to fully match against the
request URLs path. See the "Special cases" section of this document to
learn more.
static_files: Specifies the handler ID attribute that maps `url` to the
appropriate file. You can specify regular expression backreferences to
the string matched to `url`.
upload: Specifies the regular expression that is used by the application
configuration program to determine which files are uploaded as blobs.
Because it is difficult to determine this information using just the
`url` and `static_files` arguments, this attribute must be included.
This attribute is required when you define a `static_files` mapping. A
matching file name must fully match against the `upload` regular
expression, similar to how `url` is matched against the request path. Do
not begin the `upload` argument with the `^` character or end it with
the `$` character.
static_dir: Specifies the handler ID that maps the provided `url` to a
sub-directory within the application directory. See "Special cases."
mime_type: When used with `static_files` and `static_dir`, this argument
specifies that the MIME type of the files that are served from those
directories must be overridden with this value.
script: Specifies the handler ID that maps URLs to a script handler within
the application directory that will run using CGI.
position: Used in `AppInclude` objects to specify whether a handler should
be inserted at the beginning of the primary handler list or at the end.
If `tail` is specified, the handler is inserted at the end; otherwise,
the handler is inserted at the beginning. This behavior implies that
`head` is the effective default.
expiration: When used with static files and directories, this argument
specifies the time delta to use for cache expiration. This argument
should use the following format: `4d 5h 30m 15s`, where each letter
signifies days, hours, minutes, and seconds, respectively. The `s` for
"seconds" can be omitted. Only one amount must be specified, though
combining multiple amounts is optional. The following list contains
examples of values that are acceptable: `10`, `1d 6h`, `1h 30m`,
`7d 7d 7d`, `5m 30`.
api_endpoint: Specifies the handler ID that identifies an endpoint as an API
endpoint. Calls that terminate here will be handled by the API serving
framework.
Special cases:
When defining a `static_dir` handler, do not use a regular expression in the
`url` attribute. Both the `url` and `static_dir` attributes are
automatically mapped to these equivalents::
<url>/(.*)
<static_dir>/\1
For example, this declaration...::
url: /images
static_dir: images_folder
...is equivalent to this `static_files` declaration::
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
# Static file fields.
# File mappings are allowed to have regex back references.
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
# Static directory fields.
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
# Used in both static mappings.
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
# Python/CGI fields.
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
# The keys of this map are attributes which can be used to identify each
# mapping type in addition to the handler identifying attribute itself.
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Gets the handler for a mapping.
Returns:
The value of the handler, as determined by the handler ID attribute.
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Gets the handler type of a mapping.
Returns:
The handler type as determined by which handler ID attribute is set.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
MissingHandlerAttribute: If a URL handler is missing an attribute.
"""
# Special case for the `api_endpoint` handler as it may have a `script`
# attribute as well.
if getattr(self, HANDLER_API_ENDPOINT) is not None:
# Matched id attribute, break out of loop.
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
# Attributes always exist as defined by ATTRIBUTES.
if getattr(self, id_field) is not None:
# Matched id attribute, break out of loop.
mapping_type = id_field
break
else:
# If no mapping type is found raise exception.
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
# Make sure that none of the set attributes on this handler
# are not allowed for the discovered handler type.
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
# Also check that static file map has 'upload'.
# NOTE: Add REQUIRED_FIELDS along with ALLOWED_FIELDS if any more
# exceptional cases arise.
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure a handler has correct fields.
In addition to normal `ValidatedCheck`, this method calls `GetHandlerType`,
which validates whether all of the handler fields are configured properly.
Raises:
UnknownHandlerType: If none of the handler ID attributes are set.
UnexpectedHandlerAttribute: If an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: If the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: If `mime_type` is inconsistent with
`http_headers`.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
# re how headers that affect caching interact per RFC 2616:
#
# Section 13.1.3 says that when there is "apparent conflict between
# [Cache-Control] header values, the most restrictive interpretation is
# applied".
#
# Section 14.21 says that Cache-Control: max-age overrides Expires
# headers.
#
# Section 14.32 says that Pragma: no-cache has no meaning in responses;
# therefore, we do not need to be concerned about that header here.
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that `self.http_headers` is consistent with `self.mime_type`.
This method assumes that `self` is a static handler, either
`self.static_dir` or `self.static_files`. You cannot specify `None`.
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: If `self.http_headers`
contains a `Content-Type` header, and `self.mime_type` is set. For
example, the following configuration would be rejected::
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
`http_headers` and `mime_type` specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Forces omitted `secure` handler fields to be set to 'secure: optional'.
The effect is that `handler.secure` is never equal to the nominal default.
"""
# See http://b/issue?id=2073962.
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See the `version element documentation`_ to learn which URLs are reserved.
.. _`version element documentation`:
https://cloud.google.com/appengine/docs/python/config/appref#syntax
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: If the `position` attribute is specified for
an `app.yaml` file instead of an `include.yaml` file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing the admin console page in an `AdminConsole` object."""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing an admin console directives in application info."""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Returns the result of merging two `AdminConsole` objects."""
# Right now this method only needs to worry about the pages attribute of
# `AdminConsole`. However, since this object is valid as part of an
# `AppInclude` object, any objects added to `AdminConsole` in the future
# must also be merged. Rather than burying the merge logic in the process
# of merging two `AppInclude` objects, it is centralized here. If you modify
# the `AdminConsole` object to support other objects, you must also modify
# this method to support merging those additional objects.
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info."""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing built-in handler directives in application info.
This class permits arbitrary keys, but their values must be described by the
`validation.Options` object that is returned by `ATTRIBUTES`.
"""
# `Validated` is a somewhat complicated class. It actually maintains two
# dictionaries: the `ATTRIBUTES` dictionary and an internal `__dict__` object
# that maintains key value pairs.
#
# The normal flow is that a key must exist in `ATTRIBUTES` in order to be able
# to be inserted into `__dict__`. So that's why we force the
# `ATTRIBUTES.__contains__` method to always return `True`; we want to accept
# any attribute. Once the method returns `True`, then its value will be
# fetched, which returns `ATTRIBUTES[key]`; that's why we override
# `ATTRIBUTES.__getitem__` to return the validator for a `BuiltinHandler`
# object.
#
# This is where it gets tricky. Once the validator object is returned, then
# `__dict__[key]` is set to the validated object for that key. However, when
# `CheckInitialized()` is called, it uses iteritems from `ATTRIBUTES` in order
# to generate a list of keys to validate. This expects the `BuiltinHandler`
# instance to contain every item in `ATTRIBUTES`, which contains every
# built-in name seen so far by any `BuiltinHandler`. To work around this,
# `__getattr__` always returns `None` for public attribute names. Note that
# `__getattr__` is only called if `__dict__` does not contain the key. Thus,
# the single built-in value set is validated.
#
# What's important to know is that in this implementation, only the keys in
# `ATTRIBUTES` matter, and only the values in `__dict__` matter. The values in
# `ATTRIBUTES` and the keys in `__dict__` are both ignored. The key in
# `__dict__` is only used for the `__getattr__` function, but to find out what
# keys are available, only `ATTRIBUTES` is ever read.
class DynamicAttributes(dict):
"""Provides a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any `get` operation. The fixed
value that you pass in as a constructor parameter should be a
`validation.Validated` object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensures all BuiltinHandler objects at least use the `default` attribute.
Args:
**attributes: The attributes that you want to use.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Allows `ATTRIBUTES.iteritems()` to return set of items that have values.
Whenever `validate` calls `iteritems()`, it is always called on
`ATTRIBUTES`, not on `__dict__`, so this override is important to ensure
that functions such as `ToYAML()` return the correct set of keys.
Args:
key: The key for the `iteritem` that you want to set.
value: The value for the `iteritem` that you want to set.
Raises:
MultipleBuiltinsSpecified: If more than one built-in is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
# Only the name of a built-in handler is currently allowed as an attribute
# so the object can only be set once. If later attributes are desired of
# a different form, this clause should be used to catch whenever more than
# one object does not match a predefined attribute name.
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
# `__getattr__` is only called for attributes that don't exist in the
# instance dictionary.
raise AttributeError
return None
def ToDict(self):
"""Converts a `BuiltinHander` object to a dictionary.
Returns:
A dictionary in `{builtin_handler_name: on/off}` form
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Finds if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: A list of `BuiltinHandler` objects, typically
`yaml.builtins`.
builtin_name: The name of the built-in that you want to determine whether
it is defined.
Returns:
`True` if `builtin_name` is defined by a member of `builtins_list`; all
other results return `False`.
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of `BuiltinHandler` objects.
Args:
builtins_list: A list of `BuildinHandler` objects to convert to tuples.
Returns:
A list of `(name, status)` that is derived from the `BuiltinHandler`
objects.
"""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verifies that all `BuiltinHandler` objects are valid and not repeated.
Args:
builtins_list: A list of `BuiltinHandler` objects to validate.
runtime: If you specify this argument, warnings are generated for
built-ins that have been deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: If the name of a `BuiltinHandler` object cannot be
determined.
DuplicateBuiltinsSpecified: If a `BuiltinHandler` name is used more than
once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
# This checking must be done here rather than in `apphosting/ext/builtins`
# because `apphosting/ext/builtins` cannot differentiate between between
# built-ins specified in `app.yaml` versus ones added in a built-in
# include. There is a hole here where warnings are not generated for
# deprecated built-ins that appear in user-created include files.
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing `api_config` handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
# Make `URL` and `SCRIPT` required for `api_config` stanza
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Determines if the library configuration is not valid.
Raises:
appinfo_errors.InvalidLibraryName: If the specified library is not
supported.
appinfo_errors.InvalidLibraryVersion: If the specified library version is
not supported.
"""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version == 'latest':
self.version = supported_library.latest_version
elif self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
use_vers = '", "'.join(supported_library.non_deprecated_versions)
logging.warning(
'%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT,
self.name,
self.version,
use_vers)
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxint)),
}
class EndpointsApiService(validation.Validated):
"""Class representing EndpointsApiService in AppInfoExternal."""
ATTRIBUTES = {
ENDPOINTS_NAME: validation.Regex(_NON_WHITE_SPACE_REGEX),
CONFIG_ID: validation.Regex(_NON_WHITE_SPACE_REGEX),
}
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
# Attributes for VM-based AutomaticScaling.
MIN_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
MAX_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
COOL_DOWN_PERIOD_SEC: validation.Optional(
validation.Range(60, sys.maxint, int)),
CPU_UTILIZATION: validation.Optional(CpuUtilization),
TARGET_NETWORK_SENT_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_SENT_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_REQUEST_COUNT_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_CONCURRENT_REQUESTS:
validation.Optional(validation.Range(1, sys.maxint)),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class RuntimeConfig(validation.ValidatedDict):
"""Class for "vanilla" runtime configuration.
Fields used vary by runtime, so validation is delegated to the per-runtime
build processes.
These are intended to be used during Dockerfile generation, not after VM boot.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
The settings are not further validated here. The settings are validated on
the server side.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
"""Merges two `VmSettings` instances.
If a variable is specified by both instances, the value from
`vm_settings_one` is used.
Args:
vm_settings_one: The first `VmSettings` instance, or `None`.
vm_settings_two: The second `VmSettings` instance, or `None`.
Returns:
The merged `VmSettings` instance, or `None` if both input instances are
`None` or empty.
"""
# Note that `VmSettings.copy()` results in a dict.
result_vm_settings = (vm_settings_two or {}).copy()
# TODO(user): Apply merge logic when feature is fully defined.
# For now, we will merge the two dict and `vm_settings_one` will win
# if key collides.
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class BetaSettings(VmSettings):
"""Class for Beta (internal or unreleased) settings.
This class is meant to replace `VmSettings` eventually.
Note:
All new beta settings must be registered in `shared_constants.py`.
These settings are not validated further here. The settings are validated on
the server side.
"""
@classmethod
def Merge(cls, beta_settings_one, beta_settings_two):
"""Merges two `BetaSettings` instances.
Args:
beta_settings_one: The first `BetaSettings` instance, or `None`.
beta_settings_two: The second `BetaSettings` instance, or `None`.
Returns:
The merged `BetaSettings` instance, or `None` if both input instances are
`None` or empty.
"""
merged = VmSettings.Merge(beta_settings_one, beta_settings_two)
return BetaSettings(**merged.ToDict()) if merged else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key/value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges two `EnvironmentVariables` instances.
If a variable is specified by both instances, the value from
`env_variables_two` is used.
Args:
env_variables_one: The first `EnvironmentVariables` instance or `None`.
env_variables_two: The second `EnvironmentVariables` instance or `None`.
Returns:
The merged `EnvironmentVariables` instance, or `None` if both input
instances are `None` or empty.
"""
# Note that `EnvironmentVariables.copy()` results in a dict.
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def ValidateSourceReference(ref):
"""Determines if a source reference is valid.
Args:
ref: A source reference in the following format:
`[repository_uri#]revision`.
Raises:
ValidationError: If the reference is malformed.
"""
repo_revision = ref.split('#', 1)
revision_id = repo_revision[-1]
if not re.match(SOURCE_REVISION_RE_STRING, revision_id):
raise validation.ValidationError('Bad revision identifier: %s' %
revision_id)
if len(repo_revision) == 2:
uri = repo_revision[0]
if not re.match(SOURCE_REPO_RE_STRING, uri):
raise validation.ValidationError('Bad repository URI: %s' % uri)
def ValidateCombinedSourceReferencesString(source_refs):
"""Determines if `source_refs` contains a valid list of source references.
Args:
source_refs: A multi-line string containing one source reference per line.
Raises:
ValidationError: If the reference is malformed.
"""
if len(source_refs) > SOURCE_REFERENCES_MAX_SIZE:
raise validation.ValidationError(
'Total source reference(s) size exceeds the limit: %d > %d' % (
len(source_refs), SOURCE_REFERENCES_MAX_SIZE))
for ref in source_refs.splitlines():
ValidateSourceReference(ref.strip())
class HealthCheck(validation.Validated):
"""Class representing the health check configuration."""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
UNHEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HOST: validation.Optional(validation.TYPE_STR)}
class VmHealthCheck(HealthCheck):
"""Class representing the configuration of the VM health check.
Note:
This class is deprecated and will be removed in a future release. Use
`HealthCheck` instead.
"""
pass
class Volume(validation.Validated):
"""Class representing the configuration of a volume."""
ATTRIBUTES = {
VOLUME_NAME: validation.TYPE_STR,
SIZE_GB: validation.TYPE_FLOAT,
VOLUME_TYPE: validation.TYPE_STR,
}
class Resources(validation.Validated):
"""Class representing the configuration of VM resources."""
ATTRIBUTES = {
CPU: validation.Optional(validation.TYPE_FLOAT),
MEMORY_GB: validation.Optional(validation.TYPE_FLOAT),
DISK_SIZE_GB: validation.Optional(validation.TYPE_INT),
VOLUMES: validation.Optional(validation.Repeated(Volume))
}
class Network(validation.Validated):
"""Class representing the VM network configuration."""
ATTRIBUTES = {
# A list of port mappings in the form 'port' or 'external:internal'.
FORWARDED_PORTS: validation.Optional(validation.Repeated(validation.Regex(
'[0-9]+(:[0-9]+)?(/(udp|tcp))?'))),
INSTANCE_TAG: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
NETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
SUBNETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
}
class AppInclude(validation.Validated):
"""Class representing the contents of an included `app.yaml` file.
This class is used for both `builtins` and `includes` directives.
"""
# TODO(user): It probably makes sense to have a scheme where we do a
# deep-copy of fields from `AppInfoExternal` when setting the `ATTRIBUTES`
# here. Right now it's just copypasta.
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
# TODO(user): add `LIBRARIES` here when we have a good story for
# handling contradictory library requests.
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of `<manual_scaling.instances>` from the arguments.
`appinclude_one` is mutated to be the merged result in this process.
Also, this function must be updated if `ManualScaling` gets additional
fields.
Args:
appinclude_one: The first object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
appinclude_two: The second object to merge. The object must have a
`manual_scaling` field that contains a `ManualScaling()`.
Returns:
An object that is the result of merging
`appinclude_one.manual_scaling.instances` and
`appinclude_two.manual_scaling.instances`; this is returned as a revised
`appinclude_one` object after the mutations are complete.
"""
def _Instances(appinclude):
"""Determines the number of `manual_scaling.instances` sets.
Args:
appinclude: The include for which you want to determine the number of
`manual_scaling.instances` sets.
Returns:
The number of instances as an integer, or `None`.
"""
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
# We only want to mutate a param if at least one of the given
# arguments has manual_scaling.instances set.
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations.
Args:
one: The first object that you want to merge.
two: The second object that you want to merge.
Returns:
An updated `one` object containing all merged data.
"""
# Merge `ManualScaling`.
AppInclude.MergeManualScaling(one, two)
# Merge `AdminConsole` objects.
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
# Preserve the specific value of `one.vm` (`None` or `False`) when neither
# are `True`.
one.vm = two.vm or one.vm
# Merge `VmSettings` objects.
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
# Merge `BetaSettings` objects.
if hasattr(one, 'beta_settings'):
one.beta_settings = BetaSettings.Merge(one.beta_settings,
two.beta_settings)
# Merge `EnvironmentVariables` objects. The values in `two.env_variables`
# override the ones in `one.env_variables` in case of conflict.
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""Merges an `app.yaml` file with referenced builtins/includes.
Args:
appyaml: The `app.yaml` file that you want to update with `appinclude`.
appinclude: The includes that you want to merge into `appyaml`.
Returns:
An updated `app.yaml` file that includes the directives you specified in
`appinclude`.
"""
# All merge operations should occur in this function or in functions
# referenced from this one. That makes it much easier to understand what
# goes wrong when included files are not merged correctly.
if not appinclude:
return appyaml
# Merge handlers while paying attention to `position` attribute.
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
# Get rid of the `position` attribute since we no longer need it, and is
# technically invalid to include in the resulting merged `app.yaml` file
# that will be sent when deploying the application.
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
appyaml.NormalizeVmSettings()
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""Merges the non-referential state of the provided `AppInclude`.
That is, `builtins` and `includes` directives are not preserved, but any
static objects are copied into an aggregate `AppInclude` object that
preserves the directives of both provided `AppInclude` objects.
`appinclude_one` is updated to be the merged result in this process.
Args:
appinclude_one: First `AppInclude` to merge.
appinclude_two: Second `AppInclude` to merge.
Returns:
`AppInclude` object that is the result of merging the static directives of
`appinclude_one` and `appinclude_two`. An updated version of
`appinclude_one` is returned.
"""
# If one or both `appinclude` objects were `None`, return the object that
# was not `None` or return `None`.
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
# Now, both `appincludes` are non-`None`.
# Merge handlers.
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
"""Merges two `skip_files` directives.
Args:
skip_files_one: The first `skip_files` element that you want to merge.
skip_files_two: The second `skip_files` element that you want to merge.
Returns:
A list of regular expressions that are merged.
"""
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
# We exploit the handling of RegexStr where regex properties can be
# specified as a list of regexes that are then joined with |.
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a `yaml_object` builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language. For example,
you could specify `php-quercus` if this is a Java app that was generated
from PHP source using Quercus.
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific `expiration` set.
See the documentation for the `URLMap.expiration` field for more
information.
skip_files: A regular expression object. Files that match this regular
expression will not be uploaded by `appcfg.py`. For example::
skip_files: |
.svn.*|
#.*#
nobuild_files: A regular expression object. Files that match this regular
expression will not be built into the app. This directive is valid for
Go only.
api_config: URL root and script or servlet path for enhanced API serving.
"""
ATTRIBUTES = {
# Regular expressions for these attributes are defined in
# //apphosting/base/id_util.cc.
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
# An alias for `APPLICATION`.
PROJECT: validation.Optional(APPLICATION_RE_STRING),
MODULE: validation.Optional(MODULE_ID_RE_STRING),
# `service` will replace `module` soon
SERVICE: validation.Optional(MODULE_ID_RE_STRING),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: validation.Optional(RUNTIME_RE_STRING),
# A new `api_version` requires a release of the `dev_appserver`, so it
# is ok to hardcode the version names here.
API_VERSION: validation.Optional(API_VERSION_RE_STRING),
# The App Engine environment to run this version in. (VM vs. non-VM, etc.)
ENV: validation.Optional(ENV_RE_STRING),
ENDPOINTS_API_SERVICE: validation.Optional(EndpointsApiService),
# The SDK will use this for generated Dockerfiles
ENTRYPOINT: validation.Optional(validation.Type(str)),
RUNTIME_CONFIG: validation.Optional(RuntimeConfig),
INSTANCE_CLASS: validation.Optional(validation.Type(str)),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings), # Deprecated
BETA_SETTINGS: validation.Optional(BetaSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck), # Deprecated
HEALTH_CHECK: validation.Optional(HealthCheck),
RESOURCES: validation.Optional(Resources),
NETWORK: validation.Optional(Network),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
# TODO(arb): change to a regex when `validation.Repeated` supports it
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
}
def CheckInitialized(self):
"""Performs non-regular expression-based validation.
The following are verified:
- At least one URL mapping is provided in the URL mappers.
- The number of URL mappers doesn't exceed `MAX_URL_MAPS`.
- The major version does not contain the string `-dot-`.
- If `api_endpoints` are defined, an `api_config` stanza must be
defined.
- If the `runtime` is `python27` and `threadsafe` is set, then no CGI
handlers can be used.
- The version name doesn't start with `BUILTIN_NAME_PREFIX`.
- If `redirect_http_response_code` exists, it is in the list of valid
300s.
- Module and service aren't both set. Services were formerly known as
modules.
Raises:
DuplicateLibrary: If `library_name` is specified more than once.
MissingURLMapping: If no `URLMap` object is present in the object.
TooManyURLMappings: If there are too many `URLMap` entries.
MissingApiConfig: If `api_endpoints` exists without an `api_config`.
MissingThreadsafe: If `threadsafe` is not set but the runtime requires it.
ThreadsafeWithCgiHandler: If the `runtime` is `python27`, `threadsafe` is
set and CGI handlers are specified.
TooManyScalingSettingsError: If more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: If the libraries clause is used for a
runtime that does not support it, such as `python25`.
ModuleAndServiceDefined: If both `module` and `service` keywords are used.
Services were formerly known as modules.
"""
super(AppInfoExternal, self).CheckInitialized()
if self.runtime is None and not self.IsVm():
raise appinfo_errors.MissingRuntimeError(
'You must specify a "runtime" field for non-vm applications.')
elif self.runtime is None:
# Default optional to custom (we don't do that in attributes just so
# we know that it's been defaulted)
self.runtime = 'custom'
if (not self.handlers and not self.builtins and not self.includes
and not self.IsVm()):
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if self.service and self.module:
raise appinfo_errors.ModuleAndServiceDefined(
'Cannot define both "module" and "service" in configuration')
vm_runtime_python27 = (
self.runtime == 'vm' and
(hasattr(self, 'vm_settings') and
self.vm_settings and
self.vm_settings.get('vm_runtime') == 'python27') or
(hasattr(self, 'beta_settings') and
self.beta_settings and
self.beta_settings.get('vm_runtime') == 'python27'))
if (self.threadsafe is None and
(self.runtime == 'python27' or vm_runtime_python27)):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to a true or false YAML value')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if (hasattr(self, 'beta_settings') and self.beta_settings
and self.beta_settings.get('source_reference')):
ValidateCombinedSourceReferencesString(
self.beta_settings.get('source_reference'))
if self.libraries:
if not (vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
# VMEngines can handle python25 handlers, so we don't include
# vm_runtime_python27 in the if statement above.
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all `Library` instances active for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries as well as any required
dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized `Library` instances for this configuration.
Returns:
The list of active `Library` instances for this configuration. This
includes directly-specified libraries, their required dependencies, and
any libraries enabled by default. Any libraries with `latest` as their
version will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the `AppInfoExternal`.
Backend entries can contain directives that modify other parts of the
`app.yaml` file, such as the `start` directive, which adds a handler for the
start request. This method performs those modifications.
Args:
backend_name: The name of a backend that is defined in the `backends`
directive.
Raises:
BackendNotFound: If the indicated backend was not listed in the
`backends` directive.
DuplicateBackend: If the backend is found more than once in the `backends`
directive.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying `vm_runtime`.
Returns:
The effective runtime: The value of `beta/vm_settings.vm_runtime` if
`runtime` is `vm`, or `runtime` otherwise.
"""
if (self.runtime == 'vm' and hasattr(self, 'vm_settings')
and self.vm_settings is not None):
return self.vm_settings.get('vm_runtime')
if (self.runtime == 'vm' and hasattr(self, 'beta_settings')
and self.beta_settings is not None):
return self.beta_settings.get('vm_runtime')
return self.runtime
def SetEffectiveRuntime(self, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
runtime: The runtime to use.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
# Patch up vm runtime setting. Copy `runtime` to `vm_runtime` and set
# runtime to the string `vm`.
self.vm_settings['vm_runtime'] = runtime
self.runtime = 'vm'
else:
self.runtime = runtime
def NormalizeVmSettings(self):
"""Normalizes VM settings."""
# NOTE(user): In the input files, `vm` is not a type of runtime, but
# rather is specified as `vm: true|false`. In the code, `vm` is represented
# as a value of `AppInfoExternal.runtime`.
# NOTE(user): This hack is only being applied after the parsing of
# `AppInfoExternal`. If the `vm` attribute can ever be specified in the
# `AppInclude`, then this processing will need to be done there too.
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
if 'vm_runtime' not in self.vm_settings:
self.SetEffectiveRuntime(self.runtime)
# Copy fields that are automatically added by the SDK or this class
# to `beta_settings`.
if hasattr(self, 'beta_settings') and self.beta_settings:
# Only copy if `beta_settings` already exists, because we have logic in
# `appversion.py` to discard all of `vm_settings` if anything is in
# `beta_settings`. So we won't create an empty one just to add these
# fields.
for field in ['vm_runtime',
'has_docker_image',
'image',
'module_yaml_path']:
if field not in self.beta_settings and field in self.vm_settings:
self.beta_settings[field] = self.vm_settings[field]
# TODO(user): `env` replaces `vm`. Remove `vm` when field is removed.
def IsVm(self):
return (self.vm or
self.env in ['2', 'flex', 'flexible'])
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (`URLMap`) objects.
Args:
handlers: A list of a handler (`URLMap`) objects.
is_include_file: If this argument is set to `True`, the handlers that are
added as part of the `includes` directive are validated.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Loads a single `AppInfo` object where one and only one is expected.
This method validates that the the values in the `AppInfo` match the
validators that are defined in this file, in particular,
`AppInfoExternal.ATTRIBUTES`.
Args:
app_info: A file-like object or string. If the argument is a string, the
argument is parsed as a configuration file. If the argument is a
file-like object, the data is read, then parsed.
Returns:
An instance of `AppInfoExternal` as loaded from a YAML file.
Raises:
ValueError: If a specified service is not valid.
EmptyConfigurationFile: If there are no documents in YAML file.
MultipleConfigurationFile: If more than one document exists in the YAML
file.
DuplicateBackend: If a backend is found more than once in the `backends`
directive.
yaml_errors.EventError: If the `app.yaml` file fails validation.
appinfo_errors.MultipleProjectNames: If the `app.yaml` file has both an
`application` directive and a `project` directive.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
# Allow `project: name` as an alias for `application: name`. If found, we
# change the `project` field to `None`. (Deleting it would make a distinction
# between loaded and constructed `AppInfoExternal` objects, since the latter
# would still have the project field.)
if appyaml.application and appyaml.project:
raise appinfo_errors.MultipleProjectNames(
'Specify one of "application: name" or "project: name"')
elif appyaml.project:
appyaml.application = appyaml.project
appyaml.project = None
appyaml.NormalizeVmSettings()
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
This class is used to pass back information about the newly created app to
users after a new version has been created.
"""
# NOTE(user): Before you consider adding anything to this YAML definition,
# you must solve the issue that old SDK versions will try to parse this new
# value with the old definition and fail. Basically we are stuck with this
# definition for the time being. The parsing of the value is done in
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Loads a single `AppInclude` object where one and only one is expected.
Args:
app_include: A file-like object or string. The argument is set to a string,
the argument is parsed as a configuration file. If the argument is set
to a file-like object, the data is read and parsed.
Returns:
An instance of `AppInclude` as loaded from a YAML file.
Raises:
EmptyConfigurationFile: If there are no documents in the YAML file.
MultipleConfigurationFile: If there is more than one document in the YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches `_DELTA_REGEX`.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
#####################################################################
# These regexps must be the same as those in apphosting/client/app_config.cc
# and java/com/google/appengine/tools/admin/AppVersionUpload.java
# java/com/google/apphosting/admin/legacy/LegacyAppInfo.java,
# apphosting/client/app_config_old.cc,
# apphosting/api/app_config/app_config_server2.cc
# Valid characters for a file name.
_file_path_positive_re = re.compile(r'^.{1,256}$')
# Forbid `.`, `..`, and leading `-`, `_ah/` or `/`
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
# Forbid `//` and trailing `/`
_file_path_negative_2_re = re.compile(r'//|/$')
# Forbid any use of space other than in the middle of a directory or file
# name.
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
# (erinjerison) Lint seems to think I'm specifying the word "character" as an
# argument. This isn't the case; it's part of a list to enable the list to
# build properly. Disabling it for now.
# pylint: disable=g-doc-args
def ValidFilename(filename):
"""Determines if a file name is valid.
Args:
filename: The file name to validate. The file name must be a valid file
name:
- It must only contain letters, numbers, and the following special
characters: `@`, `_`, `+`, `/` `$`, `.`, `-`, or '~'.
- It must be less than 256 characters.
- It must not contain `/./`, `/../`, or `//`.
- It must not end in `/`.
- All spaces must be in the middle of a directory or file name.
Returns:
An error string if the file name is invalid. `''` is returned if the file
name is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
| mit |
devanshdalal/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 143 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
RandallDW/Aruba_plugin | plugins/org.python.pydev/pysrc/pydev_ipython/qt_for_kernel.py | 1 | 2511 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| epl-1.0 |
espenhgn/ViSAPy | examples/ISI_waveforms/ISI_waveforms.py | 1 | 44953 | #!/usr/bin/env python
'''Test how interspike interval affect spike waveforms in terms of
spike amplitude and spike width
The script is set up as the other population scripts, but location
of the single cell is in origo, and we test only along x-axis'''
#import modules
import uuid
import urllib2
import zipfile
import numpy as np
import h5py
import os
import glob
#workaround for plots on cluster
if not os.environ.has_key('DISPLAY'):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from scipy.signal import filtfilt, butter, lfilter
import scipy.interpolate as si
import scipy.optimize as so
from time import time, asctime
import spike_sort
import ViSAPy
import neuron
from mpi4py import MPI
plt.rcdefaults()
plt.rcParams.update({
'xtick.labelsize' : 11,
'xtick.major.size': 5,
'ytick.labelsize' : 11,
'ytick.major.size': 5,
'font.size' : 15,
'axes.labelsize' : 15,
'axes.titlesize' : 15,
'legend.fontsize' : 14,
'figure.subplot.wspace' : 0.4,
'figure.subplot.hspace' : 0.4,
'figure.subplot.left': 0.1,
})
smallfontsize=11
alphabet = 'abcdefghijklmnopqrstuvwxyz'
######## set random number generator seed ######################################
SEED = 123456
POPULATIONSEED = 123456
np.random.seed(SEED)
################# Initialization of MPI stuff ##################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
######## create unique output folder and copy simulation script ################
if RANK == 0:
#savefolder = glob.glob('savedata_ISI_waveforms_*')[-1]
string = asctime().split()
savefolder = os.path.join(os.path.split(__file__)[0], 'savedata_ISI_waveforms_')
for s in string:
for ss in s.split(':'):
savefolder += ss + '_'
savefolder += uuid.uuid4().hex
os.mkdir(savefolder)
os.system("cp %s '%s'" % (__file__, savefolder + '/.'))
else:
savefolder = None
savefolder = COMM.bcast(savefolder, root=0)
######### Fetch Hay et al. 2011 model files, unzip locally #####################
if not os.path.isfile('L5bPCmodelsEH/morphologies/cell1.asc'):
if RANK == 0:
#get the model files:
u = urllib2.urlopen('http://senselab.med.yale.edu/ModelDB/' +
'eavBinDown.asp?o=139653&a=23&mime=application/zip')
localFile = open('L5bPCmodelsEH.zip', 'w')
localFile.write(u.read())
localFile.close()
#unzip:
myzip = zipfile.ZipFile('L5bPCmodelsEH.zip', 'r')
myzip.extractall('.')
myzip.close()
#compile NMODL language files
os.system('''
cd L5bPCmodelsEH/mod/
nrnivmodl
cd -
''')
COMM.Barrier()
##### load NEURON mechanisms from Hay et al. 2011 ##############################
neuron.load_mechanisms("L5bPCmodelsEH/mod")
################################################################################
# PARAMETERS
################################################################################
#set up base parameter file for the LFPy.Cell or LFPy.TemplateCell class,
#without specifying cell model.
cellParameters = {
'v_init' : -80,
'passive' : False,
'nsegs_method' : None,
'timeres_NEURON' : 2**-5,
'timeres_python' : 2**-5,
'tstartms' : 0.,
'tstopms' : 4250.,
'verbose' : False,
'pt3d' : True,
}
#in this particular set up, each cell will use the same
#morphology and templatefile specification of Hay et al 2011.
morphologies = [
'L5bPCmodelsEH/morphologies/cell1.asc',
]
templatefiles = [
['L5bPCmodelsEH/models/L5PCbiophys3.hoc',
'L5bPCmodelsEH/models/L5PCtemplate.hoc'],
]
#pointer to template specification name, cf. Linden et al. 2014
cellParameters.update(dict(templatename = 'L5PCtemplate'))
# set the default rotation of the cells
defaultrotation = {}
#LFPy can simulate directly to file, but for performance reasons, this
#feature should be avoided
simulationParameters = {
#'to_file' : True, #file_name set in cellsim()
}
populationParameters = {
'POPULATION_SIZE' : 32,
'radius' : 50,
'killzone' : 25,
'z_min' : -25,
'z_max' : 175,
'X' : np.array([ [0, 0, 0, -40, -40, 0, 0],
[0, 0, 0, 40, 40, 0, 0]]),
'Y' : np.array([ [0, 0, -50, -50, -50, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]),
'Z' : np.array([-np.inf, -50.01, -50, 0, 2000, 2000.01, np.inf]),
'min_cell_interdist' : 1.,
}
#Recording electrode geometry, seven contacts
N = np.empty((7, 3))
for i in xrange(N.shape[0]): N[i,] = [1, 0, 0] #normal unit vec. to contacts
electrodeParameters = {
'x' : np.array([10, 50, 100, 25, 25, 25, 25]),
'y' : np.array([0, 0, 0, 0, 0, 0, 0]),
'z' : np.array([0, 0, 0, 100, 50, 0, -50]),
'sigma' : 0.3,
'r' : 7.5,
'n' : 10,
'N' : N,
'method' : 'som_as_point',
}
driftParameters = None
#synaptic parameters: AMPA - excitatory, GABA_A - inhibitory
synparams_AMPA = { #Excitatory synapse parameters
'e' : 0, #reversal potential
'syntype' : 'Exp2Syn', #conductance based exponential synapse
'tau1' : 1., #Time constant, rise
'tau2' : 3., #Time constant, decay
'weight' : 0.0125, #Synaptic weight
'section' : ['apic', 'dend'],
'nPerArea' : [475E-4, 475E-5], #mean +- std
}
synparams_GABA_A = { #Inhibitory synapse parameters
'e' : -80,
'syntype' : 'Exp2Syn',
'tau1' : 1.,
'tau2' : 12.,
'weight' : 0.025,
'section' : ['soma', 'apic', 'dend'],
'nPerArea' : [20E-3, 20E-4],
}
#parameters for ViSAPy.*Network instance
networkParameters = {
#class Network
'simtime' : cellParameters['tstopms']-cellParameters['tstartms'],
'dt' : cellParameters['timeres_NEURON'],
'total_num_virtual_procs' : SIZE,
'savefolder' : savefolder,
'label' : 'statPoisson',
'to_file' : True,
'to_memory' : False,
'print_time' : False,
#class StationaryPoissonNetwork
'NE' : 60000,
'NI' : 20000,
'frateE' : 10.0,
'frateI' : 10.0
}
#nyquist frequency of simulation output
nyquist = 1000. / cellParameters['timeres_python'] / 2
#set up filtering steps of extracellular potentials
filters = []
#presample filter to avoid aliasing
b, a = butter(1, np.array([0.5, 8000.]) / nyquist, btype='pass')
filters.append({
'b' : b,
'a' : a,
'filterFun' : lfilter
})
b, a = butter(4, np.array([300., 5000.]) / nyquist, btype='pass')
filters.append({
'b' : b,
'a' : a,
'filterFun' : filtfilt
})
#note, filterFun should be either scipy.signal.lfilter or filtfilt
#Noise parameters including noise covariance matrix
noiseParameters = None
noiseFeaturesParameters = None
# set the rotations
rotations = []
defaultrotation = {}
#container file for noise output etc.
noise_output_file = None
################################################################################
## MAIN
################################################################################
TIME = time()
#if database files exist, skip regenerating spike events
if not os.path.isfile(os.path.join(savefolder, 'SpTimesEx.db')) \
and not os.path.isfile(os.path.join(savefolder, 'SpTimesIn.db')):
networkInstance = ViSAPy.StationaryPoissonNetwork(**networkParameters)
networkInstance.run()
networkInstance.get_results()
networkInstance.process_gdf_files()
else:
networkInstance = ViSAPy.StationaryPoissonNetwork(**networkParameters)
#set some seeds AFTER network sim, want noise and spikes to be different,
#but populations to be equal!!!!!!
np.random.seed(POPULATIONSEED)
benchmark_data = ViSAPy.BenchmarkDataRing(
cellParameters = cellParameters,
morphologies = morphologies,
templatefiles = templatefiles,
defaultrotation = defaultrotation,
simulationParameters = simulationParameters,
populationParameters = populationParameters,
electrodeParameters = electrodeParameters,
noiseFile = noise_output_file,
filters = filters,
savefolder = savefolder,
default_h5_file = 'lfp_cell_%.3i.h5',
nPCA = 2,
TEMPLATELEN = 100,
TEMPLATEOFFS = 0.3,
spikethreshold = 3.,
networkInstance = networkInstance,
synapseParametersEx = synparams_AMPA,
synapseParametersIn = synparams_GABA_A,
driftParameters = driftParameters,
#pick Poisson trains with flat probability
randdist = np.random.rand,
sigma_EX = populationParameters['POPULATION_SIZE']+1,
sigma_IN = populationParameters['POPULATION_SIZE']+1,
)
#override the random locations and rotations
for i in xrange(benchmark_data.POPULATION_SIZE):
benchmark_data.pop_soma_pos[i] = {
'xpos' : 0,
'ypos' : 0,
'zpos' : 0,
}
benchmark_data.rotations[i] = {
'z' : 0,
}
#run the cell simulations, skip collect method
benchmark_data.run()
################################################################################
# Function declarations
################################################################################
def calc_spike_widths(LFP, tvec, threshold=0.5):
'''
calculate spike widths of all spike traces, defined at treshold which is a
fraction of the min-max amplitude of each trace
'''
def calc_spike_width(trace, tvec, threshold):
'''calculate the spike width of the negative phase at
threshold of extracellular spike trace'''
#find global minima
minind = np.where(trace == trace.min())[0]
#offset trace with local minima prior to spike before calc. spikewidth
trace -= trace.max()
#assess threshold crossings which may occur several times
inds = trace <= trace.min() * threshold
#go backwards in time and check for consistensy
for i in xrange(minind, 0, -1):
#on first occurance of False, stop loop and set remaining as False
if inds[i] == False:
inds[:i] = False
break
#go forward in time
for i in xrange(minind, len(trace)):
#on first occurance of False, stop loop and set remaining as False
if inds[i] == False:
inds[i:] = False
break
inds = np.where(inds)[0]
#linear interpolation to find crossing of threshold
x0 = np.array([tvec[inds[0]-1], tvec[inds[0]]])
y0 = np.array([trace[inds[0]], trace[inds[0]-1]])
f = si.interp1d(y0, x0)
t0 = f(trace.min() * threshold)
x1 = np.array([tvec[inds[-1]], tvec[inds[-1]+1]])
y1 = np.array([trace[inds[-1]], trace[inds[-1]+1]])
f = si.interp1d(y1, x1)
t1 = f(trace.min() * threshold)
spw = t1 - t0
if spw <= 0:
return np.nan
else:
return spw
spike_widths = []
for trace in LFP:
try:
spike_widths.append(calc_spike_width(trace.copy(), tvec, threshold))
except:
spike_widths.append(np.nan)
return np.array(spike_widths)
def nonlinfun(x, xdata):
return x[0]*np.log(xdata) + x[1]
def costfun(x, xdata, ydata):
'''cost function to be minimized, return sum of abs difference'''
#eval x for xdata-values
out = nonlinfun(x, xdata)
return abs(ydata-out).sum()
def optimize(xdata, ydata):
methods = ['Nelder-Mead'] #['Nelder-Mead'] # , 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP']
for method in methods:
xf = so.minimize(costfun, x0=np.array([.1, 1.]),
args=(xdata, ydata),
method=method,
options={'maxiter' : 1000, 'xtol': 1e-8, 'disp': True})
return xf.x
def normalfun(x, xdata):
mu = x[0]
sigma = x[1]
return 1 / np.sqrt(2*np.pi*sigma) * np.exp(-(xdata-mu)**2/(2*sigma**2))
def plot_figure_06(features, sp_waves, cmap=plt.cm.coolwarm, TRANSIENT=500):
'''plot the features from python spikesort'''
from matplotlib.colors import LogNorm
fig = plt.figure(figsize=(10,10))
#feature plots
data = features['data']
rows = cols = features['data'].shape[1]
#create a 4X4 grid for subplots on [0.1, 0.5], [0.1, 0.5]
width = 0.925 / rows * 0.99
height= 0.925 / rows * 0.99
x = np.linspace(0.05, 0.975-width, rows)
y = np.linspace(0.05, 0.975-height, rows)[::-1]
#sorting array
argsort = np.argsort(ISI)
for i, j in np.ndindex((rows, rows)):
if i < j:
continue
if i == j:
ax = fig.add_axes([x[i], y[j], width, height])
bins = np.linspace(data[:, i].min(), data[:, i].max(), 50)
ax.hist(data[:, i], bins=bins,
histtype='stepfilled', alpha=1,
edgecolor='none', facecolor='gray')
#draw normal function from mean and std
[count, bins] = np.histogram(data[:, i], bins=bins)
normal = normalfun([data[:, i].mean(),
data[:, i].std()], bins)
#scale to histogram:
normal /= normal.sum()
normal *= count.sum()
#superimpose normal function
ax.plot(bins, normal, 'k', lw=1)
ax.set_ylabel(features['names'][j])
else:
ax = fig.add_axes([x[i], y[j], width, height])
sc = ax.scatter(data[argsort, i], data[argsort, j],
marker='o',
c=ISI[argsort],
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
edgecolors='none',
s=5, alpha=1, rasterized=True)
if j == 0:
ax.set_title(features['names'][i])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_xticklabels([])
ax.axis(ax.axis('tight'))
for loc, spine in ax.spines.iteritems():
if loc in ['right', 'top',]:
spine.set_color('none')
if j == 0 and i == 0:
ax.text(-0.3, 1.0, 'b',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#plot extracted and aligned waveforms,
tvec = sp_waves['time']
vlim = abs(sp_waves['data']).max()
scale = 2.**np.round(np.log2(vlim))
yticks = []
yticklabels = []
for i in xrange(4):
yticks.append(-i*scale)
yticklabels.append('ch. %i' % (i+1))
ax1 = fig.add_axes([0.05, 0.05, 0.2, 0.5])
for i in xrange(4):
#create a line-collection
zips = []
for j, x in enumerate(sp_waves['data'][:, argsort, i].T):
zips.append(zip(tvec, x - i*scale))
linecollection = LineCollection(zips,
linewidths=(1),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True,
norm=LogNorm(),
alpha=1,
clip_on=False)
linecollection.set_array(ISI[argsort])
ax1.add_collection(linecollection)
axis = ax1.axis('tight')
ax1.axis(axis)
ax1.set_ylim(axis[2]-0.05*scale, axis[3])
for loc, spine in ax1.spines.iteritems():
if loc in ['right', 'top',]:
spine.set_color('none')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticklabels)
ax1.set_title('waveforms')
ax1.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax1.plot([tvec[-1], tvec[-1]], [axis[2],axis[2]+scale],
lw=4, color='k', clip_on=False)
ax1.text(tvec[-1]*1.03, axis[2], '%.2f mV' % scale,
fontsize=smallfontsize,
ha='left', va='bottom', rotation='vertical'
)
ax1.text(-0.1, 1.0, 'a',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax1.transAxes)
#colorbar for scatters and lines
cax = fig.add_axes([0.27, 0.05, 0.015, 0.3])
cax.set_rasterization_zorder(1)
ticks = [5, 10, 20, 50, 100, 200, 500, 1000]
axcb = fig.colorbar(linecollection, cax=cax, ticks=ticks)
axcb.ax.set_visible(True)
axcb.ax.set_yticklabels(ticks)
axcb.set_label('ISI (ms)')
return fig
def plot_figure_05(cells, benchmark_data, cmap=plt.cm.coolwarm, TRANSIENT=500.):
'''Plot some traces'n'stuff'''
from matplotlib.colors import LogNorm
TEMPLATELEN = benchmark_data.TEMPLATELEN
f = h5py.File(os.path.join(benchmark_data.savefolder, 'testISIshapes.h5'))
amplitudes_raw = f['amplitudes_raw'].value
amplitudes_flt = f['amplitudes_flt'].value
templatesRaw = f['templatesRaw'].value
templatesFlt = f['templatesFlt'].value
ISI = f['ISI'].value
concAPtemplates = f['APtemplates'].value
AP_amplitudes = f['AP_amplitudes'].value
AP_widths = f['AP_widths'].value
f.close()
#sorting array
argsort = np.argsort(ISI)
#plot some LFP-traces for a single cell
fig = plt.figure(figsize=(10, 13))
fig.subplots_adjust(wspace=0.4, hspace=0.3, bottom=0.05, top=0.95,
left=0.075, right=0.90)
ax = fig.add_subplot(5, 3, 1)
#find spikecount in total
numspikes = []
for cell in cells.values():
numspikes = np.r_[numspikes, cell.AP_train.sum()]
#pick an index with "average" rate
cellkey = np.abs(numspikes - numspikes.mean()).argmin()
##plot some PSDs from somav and LFP
#choose one cell
cell = cells[cellkey]
cell.tvec = np.arange(cell.somav.size) * cell.timeres_python
inds = np.where((cell.tvec >= TRANSIENT) & (cell.tvec <= TRANSIENT+500))[0]
somav = cell.somav[inds]
somav -= somav.min()
somav /= somav.max()
traces = somav
xmins = []
for j in xrange(3):
x = cell.LFP[j, inds]
xmin = x.min()
xmins.append(xmin)
x /= -xmin
x -= 1.5*j + 0.5
traces = np.c_[traces, x]
#print traces.shape
traces = traces.T
ax.set_xlim(TRANSIENT, cell.tvec[inds][-1])
ax.set_ylim(traces.min(), traces.max())
line_segments = LineCollection([zip(cell.tvec[inds], x) \
for x in traces],
linewidths=(1),
colors=('k'),
linestyles='solid',
rasterized=True,
clip_on=False)
ax.add_collection(line_segments)
#scalebars
ax.plot([cell.tvec[inds[-1]], cell.tvec[inds[-1]]],
[1, 0], 'k', lw=4, clip_on=False)
ax.text(cell.tvec[inds[-1]]*1.03, 0.,
r'%.0f' % (cell.somav[inds].max()-cell.somav[inds].min()) + '\n' + 'mV',
color='k', fontsize=smallfontsize, va='bottom', ha='left')
for j in xrange(3):
ax.plot([cell.tvec[inds[-1]], cell.tvec[inds[-1]]],
[-j*1.5-0.5, -j*1.5-1.5], 'k', lw=4, clip_on=False)
ax.text(cell.tvec[inds[-1]]*1.03, -j*1.5-1.5,
r'%.0f' % (abs(xmins[j]*1E3)) + '\n' + '$\mu$V', color='k',
fontsize=smallfontsize,
va='bottom',
ha='left'
)
for loc, spine in ax.spines.iteritems():
if loc in ['right','top', 'left']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax.set_yticks([0.0, -0.5, -2, -3.5])
ax.set_yticklabels([r'$V_\mathrm{soma}$',
r'$\Phi_{x=10}$',
r'$\Phi_{x=50}$',
r'$\Phi_{x=100}$'])
ax.axis(ax.axis('tight'))
ax.text(-0.2, 1.0, 'a',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#raise Exception
PSDs = np.array([])
#psd of somav
psd, freqs = plt.mlab.psd(cell.somav[cell.tvec > TRANSIENT]-cell.somav[cell.tvec > TRANSIENT].mean(),
NFFT=2**15+2**14, noverlap=int((2**15+2**14)*3./4), #5096,
Fs=1E3/np.diff(cell.tvec)[-1])
PSDs = np.r_[PSDs, psd[1:]]
#psds of LFPs
for j in xrange(3):
psd, freqs = plt.mlab.psd(cell.LFP[j, cell.tvec > TRANSIENT]-cell.LFP[j, cell.tvec > TRANSIENT].mean(),
NFFT=2**15+2**14, noverlap=int((2**15+2**14)*3./4), #NFFT=5096,
Fs=1E3/np.diff(cell.tvec)[-1])
PSDs = np.c_[PSDs, psd[1:]]
PSDs = PSDs.T
#create axes object
ax = fig.add_subplot(5, 3, 2)
ax.set_xlim(freqs[1], freqs[-1])
ax.set_ylim(PSDs[1:].min(),
PSDs[1:].max())
#create line collection
line_segments = LineCollection([zip(freqs[1:], x) \
for x in PSDs],
linewidths=(1),
colors=('k'),
linestyles='solid',
rasterized=True)
ax.add_collection(line_segments)
plt.sci(line_segments) # This allows interactive changing of the colormap.
ax.loglog()
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r'$f$ (Hz)', labelpad=0.1)
ax.set_title(r'PSD (mV$^2$/Hz)')
ax.axis(ax.axis('tight'))
ax.grid('on')
ax.text(-0.2, 1.0, 'b',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#plot histogram over ISI
ax = fig.add_subplot(5, 3, 3)
bins = 10**np.linspace(np.log10(1), np.log10(1E3), 100)
ax.hist(ISI, bins=bins,
color='gray',
histtype='stepfilled',
linewidth=0)
ax.semilogx()
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.axis(ax.axis('tight'))
ax.set_xlim([bins.min(), bins.max()])
ax.set_ylim(bottom=0)
ax.set_ylabel('count (-)', labelpad=0)
ax.set_xlabel('ISI (ms)', labelpad=0.1)
ax.set_title('ISI distr. %i APs' % ISI.size)
ax.text(-0.2, 1.0, 'c',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#plot nonfiltered spike waveforms
ax = fig.add_subplot(5,3,4)
line_segments = LineCollection([zip(np.arange(TEMPLATELEN), x) \
for x in concAPtemplates[argsort, TEMPLATELEN*0:TEMPLATELEN*1]],
linewidths=(1),
linestyles='solid',
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True)
line_segments.set_array(ISI[argsort])
ax.add_collection(line_segments)
ax.axis(ax.axis('tight'))
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_ylabel(r'$V_\mathrm{soma}$ (mV)', labelpad=0)
ax.set_xlabel('samples (-)', labelpad=0.1)
ax.text(-0.2, 1.0, 'd',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#plot AP amplitudes vs widths
ax = fig.add_subplot(5,3,5)
#mask out invalid widths
mask = True - np.isnan(AP_widths)
sc = ax.scatter(AP_widths[mask[argsort]], AP_amplitudes[mask[argsort]], marker='o',
edgecolors='none', s=5,
c=ISI[mask[argsort]], norm=LogNorm(),
cmap=plt.cm.get_cmap(cmap, 51), #bins.size)
alpha=1, clip_on=False, rasterized=True)
ax.set_ylabel('AP ampl. (mV)', labelpad=0)
ax.set_xlabel('AP width (ms)', labelpad=0.1)
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim([AP_widths[mask].min(), AP_widths[mask].max()])
ax.set_ylim([AP_amplitudes[mask].min(), AP_amplitudes[mask].max()])
ax.text(-0.2, 1.0, 'e',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
ax = fig.add_subplot(5,3,6)
#set lims
ax.set_xlim(0, TEMPLATELEN)
ax.set_ylim(templatesRaw[np.isfinite(templatesRaw[:, 0]), :][:, TEMPLATELEN*0:TEMPLATELEN*1].min(),
templatesRaw[np.isfinite(templatesRaw[:, 0]), :][:, TEMPLATELEN*0:TEMPLATELEN*1].max())
#create linecollections
line_segments = LineCollection([zip(np.arange(TEMPLATELEN), x) \
for x in templatesRaw[argsort, TEMPLATELEN*0:TEMPLATELEN*1]],
linewidths=(1),
linestyles='solid',
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True)
line_segments.set_array(ISI[argsort])
ax.add_collection(line_segments)
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_ylabel(r'$\Phi_{x=10}$ (mV)', labelpad=0)
ax.set_xlabel('samples (-)', labelpad=0.1)
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[2] = 0.015
cax = fig.add_axes(rect)
cax.set_rasterization_zorder(1)
axcb = fig.colorbar(line_segments, cax=cax)
axcb.ax.set_visible(True)
ticks = [5, 10, 20, 50, 100, 200, 500, 1000]
axcb.set_ticks(ticks)
axcb.set_ticklabels(ticks)
axcb.set_label('ISI (ms)', va='center', ha='center', labelpad=0)
ax.text(-0.2, 1.0, 'f',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
#plot FILTERED spike waveforms
ax = fig.add_subplot(5,3,7)
#set lims
ax.set_xlim(0, TEMPLATELEN)
ax.set_ylim(templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*0:TEMPLATELEN*1].min(),
templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*0:TEMPLATELEN*1].max())
#create linecollections
line_segments = LineCollection([zip(np.arange(TEMPLATELEN), x) \
for x in templatesFlt[argsort, TEMPLATELEN*0:TEMPLATELEN*1]],
linewidths=(1),
linestyles='solid',
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True)
line_segments.set_array(ISI[argsort])
ax.add_collection(line_segments)
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel('samples (-)', labelpad=0.1)
ax.set_ylabel(r'$\Phi_{x=10}$ (mV)', labelpad=0)
ax.text(-0.2, 1.0, 'g',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
ax = fig.add_subplot(5,3,8)
#set lims
ax.set_xlim(0, TEMPLATELEN)
ax.set_ylim(templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*1:TEMPLATELEN*2].min(),
templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*1:TEMPLATELEN*2].max())
#create linecollections
line_segments = LineCollection([zip(np.arange(TEMPLATELEN), x) \
for x in templatesFlt[argsort, TEMPLATELEN*1:TEMPLATELEN*2]],
linewidths=(1),
linestyles='solid',
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True)
line_segments.set_array(ISI[argsort])
ax.add_collection(line_segments)
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel('samples (-)', labelpad=0.1)
ax.set_ylabel(r'$\Phi_{x=50}$ (mV)', labelpad=0)
ax.text(-0.2, 1.0, 'h',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
ax = fig.add_subplot(5,3,9)
#set lims
ax.set_xlim(0, TEMPLATELEN)
ax.set_ylim(templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*2:TEMPLATELEN*3].min(),
templatesFlt[np.isfinite(templatesFlt[:, 0]), :][:, TEMPLATELEN*2:TEMPLATELEN*3].max())
#create linecollections
line_segments = LineCollection([zip(np.arange(TEMPLATELEN), x) \
for x in templatesFlt[argsort, TEMPLATELEN*2:TEMPLATELEN*3]],
linewidths=(1),
linestyles='solid',
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
rasterized=True)
line_segments.set_array(ISI[argsort])
ax.add_collection(line_segments)
plt.sci(line_segments) # This allows interactive changing of the colormap.
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel('samples (-)', labelpad=0.1)
ax.set_ylabel(r'$\Phi_{x=100}$ (mV)', labelpad=0)
ax.text(-0.2, 1.0, 'i',
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
for i in xrange(3):
ax = fig.add_subplot(5, 3, i+10)
sc = ax.scatter(spikewidths_flt[i, argsort], amplitudes_flt[i, argsort], marker='o',
edgecolors='none', s=5,
c=ISI[argsort],
norm=LogNorm(),
cmap = plt.cm.get_cmap(cmap, 51),
label='filtered', alpha=1, clip_on=False,
rasterized=True)
if i == 0: ax.set_ylabel('amplitude (mV)', labelpad=0)
ax.set_xlabel('width (ms)', labelpad=0.1)
ax.set_xlim([spikewidths_flt[i, :].min(), spikewidths_flt[i, :].max()])
ax.set_ylim([amplitudes_flt[i, :].min(), amplitudes_flt[i, :].max()])
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(-0.2, 1.0, alphabet[i+9],
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
for i in xrange(3):
ax = fig.add_subplot(5 , 3, i+13)
sc = ax.scatter(ISI, amplitudes_flt[i, :], marker='o',
edgecolors='none', s=5,
facecolors='k',
label='filtered', alpha=1, clip_on=False,
rasterized=True)
if i == 0: ax.set_ylabel('amplitude (mV)', labelpad=0)
ax.set_xlabel('ISI (ms)', labelpad=0.1)
ax.set_xlim([ISI.min(), ISI.max()])
ax.set_ylim([amplitudes_flt[i, :].min(), amplitudes_flt[i, :].max()])
ax.semilogx()
for loc, spine in ax.spines.iteritems():
if loc in ['right','top']:
spine.set_color('none') # don't draw spine
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(-0.2, 1.0, alphabet[i+12],
horizontalalignment='center',
verticalalignment='bottom',
fontsize=18, fontweight='demibold',
transform=ax.transAxes)
return fig
################################################################################
# Perform some preprocessing for plots
################################################################################
if RANK == 0:
cellindices = np.arange(benchmark_data.POPULATION_SIZE)
cells = benchmark_data.read_lfp_cell_files(cellindices)
print 'cells ok'
#recalculate AP_trains using the slope of somav
AP_threshold = -30
for cellkey, cell in cells.iteritems():
setattr(cell, 'AP_train',
benchmark_data.return_spiketrains(v=cell.somav, v_t=AP_threshold, TRANSIENT=500.))
#do some filtering of the LFP traces of each cell
for i, cell in cells.iteritems():
LFP_flt = cell.LFP.value.copy()
for fltr in benchmark_data.filters:
LFP_flt = fltr['filterFun'](fltr['b'], fltr['a'], LFP_flt)
setattr(cell, 'LFP_flt', LFP_flt)
#Will reformat template waveform to correspond with ISI, so
#for each cell in population we extract the SECOND etc spike waveform.
AllTemplatesRaw = {}
AllTemplatesFlt = {}
APtemplates = {}
spWavesTetrode = {}
#Contain ISI from AP_trains
ISI = []
for cellkey, cell in cells.iteritems():
#use spike_sort module to extract, upsample, and align spike waveforms
#sample window in ms:
sp_win = ((np.array([0, benchmark_data.TEMPLATELEN]) -
benchmark_data.TEMPLATELEN*benchmark_data.TEMPLATEOFFS)
* cellParameters['timeres_python']).tolist()
#raw LFP per cell
spRaw = {
'data' : cell.LFP,
'FS' : 1E3 / cellParameters['timeres_python'],
'n_contacts' : 3
}
#filtered LFP per cell
spFlt = {
'data' : cell.LFP_flt,
'FS' : 1E3 / cellParameters['timeres_python'],
'n_contacts' : 3
}
spAPs = {
'data' : cell.somav.reshape((1, -1)),
'FS' : 1E3 / cellParameters['timeres_python'],
'n_contacts' : 1
}
#find spike events, and prune spikes near bounds
OFF = int(benchmark_data.TEMPLATELEN*benchmark_data.TEMPLATEOFFS)
APs = np.where(cell.AP_train == 1)[0] #*cellParameters['timeres_python']
#collect ISI
APs = APs[(APs > OFF) & (APs < cell.AP_train.size - (benchmark_data.TEMPLATELEN-OFF))].astype(float)
APs *= cellParameters['timeres_python']
ISI = np.r_[ISI, np.diff(APs)]
sptAPs = {
'data' : APs[1:], #discard first waveform, ISI not known for these
'contact' : 0,
'thresh' : 0,
}
#allow independent alignment of waveforms
sptRaw = sptAPs.copy()
sptFlt = sptAPs.copy()
#aligned spike times to min of channel 0 for raw and filtered traces
sptRaw = spike_sort.extract.align_spikes(spRaw, sptRaw, sp_win,
contact=0, remove=False, type="min")
sptFlt = spike_sort.extract.align_spikes(spFlt, sptFlt, sp_win,
contact=0, remove=False, type="min")
sptAPs = spike_sort.extract.align_spikes(spAPs, sptAPs, sp_win,
contact=0, remove=False, type="max")
#extract spike waveforms:
spWavesRaw = spike_sort.extract.extract_spikes(spRaw, sptRaw, sp_win,
contacts=[0, 1, 2])
spWavesFlt = spike_sort.extract.extract_spikes(spFlt, sptFlt, sp_win,
contacts=[0, 1, 2])
spWavesAPs = spike_sort.extract.extract_spikes(spAPs, sptAPs, sp_win,
contacts=[0])
#spikes from "tetrode"
spWavesTetrode.update({cellkey : spike_sort.extract.extract_spikes(spFlt,
sptFlt, sp_win, contacts=[3, 4, 5, 6])})
#convert to 2D arrays, each row is 3 channels concatenated
temp = []
for i in xrange(spWavesRaw['data'].shape[1]):
temp.append(spWavesRaw['data'][:, i, :].T.flatten())
spWavesRaw = np.array(temp)
temp = []
for i in xrange(spWavesFlt['data'].shape[1]):
temp.append(spWavesFlt['data'][:, i, :].T.flatten())
spWavesFlt = np.array(temp)
temp = []
for i in xrange(spWavesAPs['data'].shape[1]):
temp.append(spWavesAPs['data'][:, i, :].T.flatten())
spWavesAPs = np.array(temp)
#fill in values
AllTemplatesRaw.update({cellkey : spWavesRaw})
AllTemplatesFlt.update({cellkey : spWavesFlt})
APtemplates.update({cellkey : spWavesAPs})
#delete some variables
del temp, spWavesRaw, spWavesFlt, sptRaw, sptFlt, spRaw, spFlt, sp_win
#employ spike_sort to calculate PCs, first reformat structure
sp_waves = {
'time' : spWavesTetrode[0]['time'],
#'data' : [],
'FS' : spWavesTetrode[0]['FS']
}
for key, value in spWavesTetrode.items():
if key == 0:
sp_waves['data'] = value['data']
else:
sp_waves['data'] = np.r_['1', sp_waves['data'], value['data']]
#compute features
features = ViSAPy.plottestdata.fetPCA(sp_waves, ncomps=benchmark_data.nPCA)
#concatenate the templates and basis functions for each cell object,
#except first spike
concTemplatesRaw = None
concTemplatesFlt = None
concAPtemplates = None
channels = ['ch. 0', 'ch. 1', 'ch. 2']
for i, cell in cells.iteritems():
if i == 0:
if AllTemplatesRaw[i].size > 0:
concTemplatesRaw = AllTemplatesRaw[i]
concTemplatesFlt = AllTemplatesFlt[i]
concAPtemplates = APtemplates[i]
else:
if AllTemplatesRaw[i].size > 0:
concTemplatesRaw = np.r_[concTemplatesRaw,
AllTemplatesRaw[i]]
concTemplatesFlt = np.r_[concTemplatesFlt,
AllTemplatesFlt[i]]
concAPtemplates = np.r_[concAPtemplates, APtemplates[i]]
del AllTemplatesRaw, AllTemplatesFlt, APtemplates
print 'concTemplatesRaw, concTemplatesFlt, concAPtemplates ok'
#extract the amplitudes between min and max of the templates in
#the three contacts
amplitudes_raw = np.empty((len(channels), concTemplatesRaw.shape[0]))
amplitudes_flt = np.empty((len(channels), concTemplatesFlt.shape[0]))
TEMPLATELEN = benchmark_data.TEMPLATELEN
for j in xrange(3):
i = 0
for x in concTemplatesRaw[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]:
amplitudes_raw[j, i] = x.max() - x.min()
i += 1
for j in xrange(3):
i = 0
for x in concTemplatesFlt[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]:
amplitudes_flt[j, i] = x.max() - x.min()
i += 1
print 'amplitudes ok'
#calc spikewidths for each contact and raw, filtered shapes
spikewidths_raw = []
spikewidths_flt = []
cell.tvec = np.arange(cell.somav.size)*cell.timeres_python
tvec = cell.tvec[:TEMPLATELEN]
for j in xrange(3):
LFP = concTemplatesRaw[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]
spikewidths_raw.append(calc_spike_widths(LFP, tvec, threshold=0.5))
LFP = concTemplatesFlt[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]
spikewidths_flt.append(calc_spike_widths(LFP, tvec, threshold=0.5))
spikewidths_raw = np.array(spikewidths_raw)
spikewidths_flt = np.array(spikewidths_flt)
#spike width and amplitude of APs
AP_widths = calc_spike_widths(-concAPtemplates, tvec, threshold=0.5)
AP_amplitudes = concAPtemplates.max(axis=1) - concAPtemplates.min(axis=1)
print 'spikewidths_*, AP_widths, AP_amplitudes ok'
#calculate projections according to Fee et al. 1996 (eq. 8)
projectionFeeRaw = None
projectionFeeFlt = None
data = concTemplatesRaw
for j in xrange(3):
templates = data[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]
notnans = np.isfinite(templates[:, 0])
V_Smean = templates[notnans, ][ISI[notnans, ] <= 10, ].mean(axis=0)
V_Lmean = templates[notnans, ][ISI[notnans, ] >= 100, ].mean(axis=0)
dV_LS = V_Lmean - V_Smean
if j == 0:
projectionFeeRaw = np.dot((templates - V_Lmean), dV_LS) / \
np.dot(dV_LS, dV_LS)
else:
projectionFeeRaw = np.r_[projectionFeeRaw,
np.dot((templates - V_Lmean), dV_LS) / \
np.dot(dV_LS, dV_LS)]
data = concTemplatesFlt
for j in xrange(3):
templates = data[:, TEMPLATELEN*j:TEMPLATELEN*(j+1)]
notnans = np.isfinite(templates[:, 0])
V_Smean = templates[notnans, ][ISI[notnans, ] <= 10, ].mean(axis=0)
V_Lmean = templates[notnans, ][ISI[notnans, ] >= 100, ].mean(axis=0)
dV_LS = V_Lmean - V_Smean
if j == 0:
projectionFeeFlt = np.dot((templates - V_Lmean), dV_LS) / \
np.dot(dV_LS, dV_LS)
else:
projectionFeeFlt = np.r_[projectionFeeFlt,
np.dot((templates - V_Lmean), dV_LS) / \
np.dot(dV_LS, dV_LS)]
projectionFeeRaw = projectionFeeRaw.reshape(3, -1)
projectionFeeFlt = projectionFeeFlt.reshape(3, -1)
#save additional sim results
f = h5py.File(os.path.join(benchmark_data.savefolder, 'testISIshapes.h5'))
try: f['ISI'] = ISI
except:
del f['ISI']
f['ISI'] = ISI
try: f['templatesRaw'] = concTemplatesRaw
except:
del f['templatesRaw']
f['templatesRaw'] = concTemplatesRaw
try: f['templatesFlt'] = concTemplatesFlt
except:
del f['templatesFlt']
f['templatesFlt'] = concTemplatesFlt
try: f['amplitudes_raw'] = amplitudes_raw
except:
del f['amplitudes_raw']
f['amplitudes_raw'] = amplitudes_raw
try: f['amplitudes_flt'] = amplitudes_flt
except:
del f['amplitudes_flt']
f['amplitudes_flt'] = amplitudes_flt
try: f['spikewidths_raw'] = spikewidths_raw
except:
del f['spikewidths_raw']
f['spikewidths_raw'] = spikewidths_raw
try: f['spikewidths_flt'] = spikewidths_flt
except:
del f['spikewidths_flt']
f['spikewidths_flt'] = spikewidths_flt
try: f['projectionFeeRaw'] = projectionFeeRaw
except:
del f['projectionFeeRaw']
f['projectionFeeRaw'] = projectionFeeRaw
try: f['projectionFeeFlt'] = projectionFeeFlt
except:
del f['projectionFeeFlt']
f['projectionFeeFlt'] = projectionFeeFlt
try: f['APtemplates'] = concAPtemplates
except:
del f['APtemplates']
f['APtemplates'] = concAPtemplates
try: f['AP_widths'] = AP_widths
except:
del f['AP_widths']
f['AP_widths'] = AP_widths
try: f['AP_amplitudes'] = AP_amplitudes
except:
del f['AP_amplitudes']
f['AP_amplitudes'] = AP_amplitudes
f.close()
#############################################################
# Plot some STUFF
#############################################################
fig = plot_figure_05(cells, benchmark_data, cmap=plt.cm.coolwarm)
fig.savefig(os.path.join(benchmark_data.savefolder, 'figure_05.pdf'), dpi=150)
fig = plot_figure_06(features, sp_waves, cmap=plt.cm.coolwarm)
fig.savefig(os.path.join(savefolder, 'figure_06.pdf'), dpi=150)
fig, ax = plt.subplots(1, figsize=(10, 10))
for i, cell in cells.items():
ax.plot(cell.somav+i*100, lw=0.5)
plt.axis('tight')
fig.savefig(os.path.join(benchmark_data.savefolder, 'somatraces.pdf'), dpi=300)
plt.show()
| gpl-2.0 |
shuangshuangwang/spark | python/pyspark/sql/dataframe.py | 1 | 99845 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise Exception("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str or :class:`Column`
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise ValueError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise ValueError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
nathanielvarona/airflow | tests/providers/google/cloud/transfers/test_sql_to_gcs.py | 3 | 9440 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from typing import Dict
from unittest import mock
from unittest.mock import MagicMock, Mock
import pandas as pd
import unicodecsv as csv
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
SQL = "SELECT * FROM test_table"
BUCKET = "TEST-BUCKET-1"
FILENAME = "test_results.csv"
TASK_ID = "TEST_TASK_ID"
SCHEMA = [
{"name": "column_a", "type": "3"},
{"name": "column_b", "type": "253"},
{"name": "column_c", "type": "10"},
]
COLUMNS = ["column_a", "column_b", "column_c"]
ROW = ["convert_type_return_value", "convert_type_return_value", "convert_type_return_value"]
CURSOR_DESCRIPTION = [
("column_a", "3", 0, 0, 0, 0, False),
("column_b", "253", 0, 0, 0, 0, False),
("column_c", "10", 0, 0, 0, 0, False),
]
TMP_FILE_NAME = "temp-file"
INPUT_DATA = [
["101", "school", "2015-01-01"],
["102", "business", "2017-05-24"],
["103", "non-profit", "2018-10-01"],
]
OUTPUT_DATA = json.dumps(
{
"column_a": "convert_type_return_value",
"column_b": "convert_type_return_value",
"column_c": "convert_type_return_value",
}
).encode("utf-8")
SCHEMA_FILE = "schema_file.json"
APP_JSON = "application/json"
OUTPUT_DF = pd.DataFrame([['convert_type_return_value'] * 3] * 3, columns=COLUMNS)
class DummySQLToGCSOperator(BaseSQLToGCSOperator):
def field_to_bigquery(self, field) -> Dict[str, str]:
return {
'name': field[0],
'type': 'STRING',
'mode': 'NULLABLE',
}
def convert_type(self, value, schema_type):
return 'convert_type_return_value'
def query(self):
pass
class TestBaseSQLToGCSOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.NamedTemporaryFile")
@mock.patch.object(csv.writer, "writerow")
@mock.patch.object(GCSHook, "upload")
@mock.patch.object(DummySQLToGCSOperator, "query")
@mock.patch.object(DummySQLToGCSOperator, "convert_type")
def test_exec(self, mock_convert_type, mock_query, mock_upload, mock_writerow, mock_tempfile):
cursor_mock = Mock()
cursor_mock.description = CURSOR_DESCRIPTION
cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA))
mock_query.return_value = cursor_mock
mock_convert_type.return_value = "convert_type_return_value"
mock_file = Mock()
mock_tell = Mock()
mock_tell.return_value = 3
mock_file.tell = mock_tell
mock_flush = Mock()
mock_file.flush = mock_flush
mock_close = Mock()
mock_file.close = mock_close
mock_file.name = TMP_FILE_NAME
mock_write = Mock()
mock_file.write = mock_write
mock_tempfile.return_value = mock_file
# Test CSV
operator = DummySQLToGCSOperator(
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
task_id=TASK_ID,
schema_filename=SCHEMA_FILE,
approx_max_file_size_bytes=1,
export_format="csv",
gzip=True,
schema=SCHEMA,
gcp_conn_id='google_cloud_default',
)
operator.execute(context=dict())
mock_query.assert_called_once()
mock_writerow.assert_has_calls(
[
mock.call(COLUMNS),
mock.call(ROW),
mock.call(COLUMNS),
mock.call(ROW),
mock.call(COLUMNS),
mock.call(ROW),
mock.call(COLUMNS),
]
)
mock_flush.assert_has_calls([mock.call(), mock.call(), mock.call(), mock.call(), mock.call()])
csv_call = mock.call(BUCKET, FILENAME, TMP_FILE_NAME, mime_type='text/csv', gzip=True)
json_call = mock.call(BUCKET, SCHEMA_FILE, TMP_FILE_NAME, mime_type=APP_JSON, gzip=False)
upload_calls = [csv_call, csv_call, csv_call, json_call]
mock_upload.assert_has_calls(upload_calls)
mock_close.assert_has_calls([mock.call(), mock.call(), mock.call(), mock.call(), mock.call()])
mock_query.reset_mock()
mock_flush.reset_mock()
mock_upload.reset_mock()
mock_close.reset_mock()
cursor_mock.reset_mock()
cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA))
# Test JSON
operator = DummySQLToGCSOperator(
sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="json", schema=SCHEMA
)
operator.execute(context=dict())
mock_query.assert_called_once()
mock_write.assert_has_calls(
[
mock.call(OUTPUT_DATA),
mock.call(b"\n"),
mock.call(OUTPUT_DATA),
mock.call(b"\n"),
mock.call(OUTPUT_DATA),
mock.call(b"\n"),
]
)
mock_flush.assert_called_once()
mock_upload.assert_called_once_with(BUCKET, FILENAME, TMP_FILE_NAME, mime_type=APP_JSON, gzip=False)
mock_close.assert_called_once()
mock_query.reset_mock()
mock_flush.reset_mock()
mock_upload.reset_mock()
mock_close.reset_mock()
cursor_mock.reset_mock()
cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA))
# Test parquet
operator = DummySQLToGCSOperator(
sql=SQL, bucket=BUCKET, filename=FILENAME, task_id=TASK_ID, export_format="parquet", schema=SCHEMA
)
operator.execute(context=dict())
mock_query.assert_called_once()
mock_flush.assert_called_once()
mock_upload.assert_called_once_with(
BUCKET, FILENAME, TMP_FILE_NAME, mime_type='application/octet-stream', gzip=False
)
mock_close.assert_called_once()
# Test null marker
cursor_mock.__iter__ = Mock(return_value=iter(INPUT_DATA))
mock_convert_type.return_value = None
operator = DummySQLToGCSOperator(
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
task_id=TASK_ID,
export_format="csv",
null_marker="NULL",
)
operator.execute(context=dict())
mock_writerow.assert_has_calls(
[
mock.call(COLUMNS),
mock.call(["NULL", "NULL", "NULL"]),
mock.call(["NULL", "NULL", "NULL"]),
mock.call(["NULL", "NULL", "NULL"]),
]
)
def test__write_local_data_files_csv(self):
op = DummySQLToGCSOperator(
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
task_id=TASK_ID,
schema_filename=SCHEMA_FILE,
export_format="csv",
gzip=False,
schema=SCHEMA,
gcp_conn_id='google_cloud_default',
)
cursor = MagicMock()
cursor.__iter__.return_value = INPUT_DATA
cursor.description = CURSOR_DESCRIPTION
files = op._write_local_data_files(cursor)
file = files[0]['file_handle']
file.flush()
df = pd.read_csv(file.name)
assert df.equals(OUTPUT_DF)
def test__write_local_data_files_json(self):
op = DummySQLToGCSOperator(
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
task_id=TASK_ID,
schema_filename=SCHEMA_FILE,
export_format="json",
gzip=False,
schema=SCHEMA,
gcp_conn_id='google_cloud_default',
)
cursor = MagicMock()
cursor.__iter__.return_value = INPUT_DATA
cursor.description = CURSOR_DESCRIPTION
files = op._write_local_data_files(cursor)
file = files[0]['file_handle']
file.flush()
df = pd.read_json(file.name, orient='records', lines=True)
assert df.equals(OUTPUT_DF)
def test__write_local_data_files_parquet(self):
op = DummySQLToGCSOperator(
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
task_id=TASK_ID,
schema_filename=SCHEMA_FILE,
export_format="parquet",
gzip=False,
schema=SCHEMA,
gcp_conn_id='google_cloud_default',
)
cursor = MagicMock()
cursor.__iter__.return_value = INPUT_DATA
cursor.description = CURSOR_DESCRIPTION
files = op._write_local_data_files(cursor)
file = files[0]['file_handle']
file.flush()
df = pd.read_parquet(file.name)
assert df.equals(OUTPUT_DF)
| apache-2.0 |
huzq/scikit-learn | sklearn/mixture/tests/test_gaussian_mixture.py | 11 | 42047 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import sys
import copy
import warnings
import pytest
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets import make_spd_matrix
from io import StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture import GaussianMixture
from sklearn.mixture._gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical,
_compute_precision_cholesky,
_compute_log_det_cholesky,
)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData:
def __init__(self, rng, n_samples=200, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
dtype=int)
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% n_components_bad, gmm.fit, X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type_bad,
gmm.fit, X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
assert_raise_message(ValueError,
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% tol_bad, gmm.fit, X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
assert_raise_message(ValueError,
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % reg_covar_bad, gmm.fit, X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
assert_raise_message(ValueError,
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% max_iter_bad, gmm.fit, X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% n_init_bad, gmm.fit, X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
assert_raise_message(ValueError,
"Unimplemented initialization method '%s'"
% init_params_bad,
gmm.fit, X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert gmm.n_components == n_components
assert gmm.covariance_type == covariance_type
assert gmm.tol == tol
assert gmm.reg_covar == reg_covar
assert gmm.max_iter == max_iter
assert gmm.n_init == n_init
assert gmm.init_params == init_params
def test_check_X():
from sklearn.mixture._base import _check_X
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 2, 2
X_bad_dim = rng.rand(n_components - 1, n_features)
assert_raise_message(ValueError,
'Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X_bad_dim.shape[0]),
_check_X, X_bad_dim, n_components)
X_bad_dim = rng.rand(n_components, n_features + 1)
assert_raise_message(ValueError,
'Expected the input data X have %d features, '
'but got %d features'
% (n_features, X_bad_dim.shape[1]),
_check_X, X_bad_dim, n_components, n_features)
X = rng.rand(n_samples, n_features)
assert_array_equal(X, _check_X(X, n_components, n_features))
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
assert_raise_message(ValueError,
"The parameter 'weights' should have the shape of "
"(%d,), but got %s" %
(n_components, str(weights_bad_shape.shape)),
g.fit, X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
assert_raise_message(ValueError,
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights_bad_range),
np.max(weights_bad_range)),
g.fit, X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
assert_raise_message(ValueError,
"The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f"
% np.sum(weights_bad_norm),
g.fit, X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
g.fit, X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': np.full((n_components, n_features), -1.),
'spherical': np.full(n_components, -1.)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
assert_raise_message(ValueError,
"The parameter '%s precision' should have "
"the shape of" % covar_type,
g.fit, X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
assert_raise_message(ValueError,
"'%s precision' should be %s"
% (covar_type, not_positive_errors[covar_type]),
g.fit, X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,
nk, xk, 0)
covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,
'spherical')
assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)
def test_compute_log_det_cholesky():
n_features = 2
rand_data = RandomData(np.random.RandomState(0))
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == 'full':
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == 'tied':
predected_det = linalg.det(covariance)
elif covar_type == 'diag':
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == 'spherical':
predected_det = covariance ** n_features
# We compute the cholesky decomposition of the covariance matrix
expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(
covariance, covar_type), covar_type, n_features=n_features)
assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob
# test against with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob(X, means,
precs_spherical, 'spherical')
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", g.predict, X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert adjusted_rand_score(Y, Y_pred) > .95
@pytest.mark.filterwarnings("ignore:.*did not converge.*")
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_gaussian_mixture_fit_predict(seed, max_iter, tol):
rng = np.random.RandomState(seed)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type,
max_iter=max_iter, tol=tol)
# check if fit_predict(X) is equivalent to fit(X).predict(X)
f = copy.deepcopy(g)
Y_pred1 = f.fit(X).predict(X)
Y_pred2 = g.fit_predict(X)
assert_array_equal(Y_pred1, Y_pred2)
assert adjusted_rand_score(Y, Y_pred2) > .95
def test_gaussian_mixture_fit_predict_n_init():
# Check that fit_predict is equivalent to fit.predict, when n_init > 1
X = np.random.RandomState(0).randn(1000, 5)
gm = GaussianMixture(n_components=5, n_init=5, random_state=0)
y_pred1 = gm.fit_predict(X)
y_pred2 = gm.predict(X)
assert_array_equal(y_pred1, y_pred2)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_warns_message(ConvergenceWarning,
'Initialization %d did not converge. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% max_iter, g.fit, X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=0, n_init=5).fit(X).score(X)
assert train2 >= train1
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert (g.aic(X) - aic) / n_samples < bound
assert (g.bic(X) - bic) / n_samples < bound
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.filterwarnings('ignore:.*did not converge.*')
@pytest.mark.parametrize("seed", (0, 1, 2))
def test_warm_start(seed):
random_state = seed
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,
reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,
reg_covar=0, random_state=random_state,
warm_start=True)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert score2 > score1
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
g.fit(X)
assert not g.converged_
h.fit(X)
# depending on the data there is large variability in the number of
# refit necessary to converge due to the complete randomness of the
# data
for _ in range(1000):
h.fit(X)
if h.converged_:
break
assert h.converged_
@ignore_warnings(category=ConvergenceWarning)
def test_convergence_detected_with_warm_start():
# We check that convergence is detected when warm_start=True
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
for max_iter in (1, 2, 50):
gmm = GaussianMixture(n_components=n_components, warm_start=True,
max_iter=max_iter, random_state=rng)
for _ in range(100):
gmm.fit(X)
if gmm.converged_:
break
assert gmm.converged_
assert max_iter >= gmm.n_iter_
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", gmm1.score, X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert gmm2.score(X) > gmm1.score(X)
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this estimator.", gmm.score_samples, X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert gmm_score_samples.shape[0] == rand_data.n_samples
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
current_log_likelihood = gmm.fit(X).score(X)
assert current_log_likelihood >= prev_log_likelihood
if gmm.converged_:
break
assert gmm.converged_
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_raise_message(ValueError,
"Fitting the mixture model failed because "
"some components have ill-defined empirical "
"covariance (for instance caused by "
"singleton or collapsed samples). Try to "
"decrease the number of components, or "
"increase reg_covar.", gmm.fit, X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng,
n_init=5)
gmm.fit(X)
if covar_type == 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, n_components=3)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
# To sample we need that GaussianMixture is fitted
assert_raise_message(NotFittedError, "This GaussianMixture instance "
"is not fitted", gmm.sample, 0)
gmm.fit(X)
assert_raise_message(ValueError, "Invalid value for 'n_samples",
gmm.sample, 0)
# Just to make sure the class samples correctly
n_samples = 20000
X_s, y_s = gmm.sample(n_samples)
for k in range(n_components):
if covar_type == 'full':
assert_array_almost_equal(gmm.covariances_[k],
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'tied':
assert_array_almost_equal(gmm.covariances_,
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'diag':
assert_array_almost_equal(gmm.covariances_[k],
np.diag(np.cov(X_s[y_s == k].T)),
decimal=1)
else:
assert_array_almost_equal(
gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1)
means_s = np.array([np.mean(X_s[y_s == k], 0)
for k in range(n_components)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
# Check shapes of sampled data, see
# https://github.com/scikit-learn/scikit-learn/issues/7701
assert X_s.shape == (n_samples, n_features)
for sample_size in range(1, 100):
X_s, _ = gmm.sample(sample_size)
assert X_s.shape == (sample_size, n_features)
@ignore_warnings(category=ConvergenceWarning)
def test_init():
# We check that by increasing the n_init number we have a better solution
for random_state in range(15):
rand_data = RandomData(np.random.RandomState(random_state),
n_samples=50, scale=1)
n_components = rand_data.n_components
X = rand_data.X['full']
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, random_state=random_state).fit(X)
gmm2 = GaussianMixture(n_components=n_components, n_init=10,
max_iter=1, random_state=random_state).fit(X)
assert gmm2.lower_bound_ >= gmm1.lower_bound_
| bsd-3-clause |
KristoferHellman/gimli | doc/tutorials/modelling/develop/plot_XX_mod_fd_burgers-2d.py | 3 | 2117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
###variable declarations
nx = 41
ny = 41
nt = 120
c = 1
dx = 2.0/(nx-1)
dy = 2.0/(ny-1)
sigma = .0009
nu = 0.001
dt = sigma*dx*dy/nu
x = np.linspace(0,2,nx)
y = np.linspace(0,2,ny)
u = np.ones((ny,nx)) ##create a 1xn vector of 1's
v = np.ones((ny,nx))
un = np.ones((ny,nx)) ##
vn = np.ones((ny,nx))
comb = np.ones((ny,nx))
###Assign initial conditions
u[.5/dy:1/dy+1,.5/dx:1/dx+1]=2 ##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2
v[.5/dy:1/dy+1,.5/dx:1/dx+1]=2 ##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2
###(plot ICs)
fig = plt.figure(figsize=(11,7), dpi=100)
ax1 = fig.add_subplot(2,2,1, projection='3d')
ax2 = fig.add_subplot(2,2,2, projection='3d')
X,Y = np.meshgrid(x,y)
wire1 = ax1.plot_wireframe(X,Y,u[:], cmap=cm.coolwarm)
wire2 = ax2.plot_wireframe(X,Y,v[:], cmap=cm.coolwarm)
#ax.set_xlim(1,2)
#ax.set_ylim(1,2)
#ax.set_zlim(1,5)
for n in range(nt+1): ##loop across number of time steps
un = u.copy()
vn = v.copy()
u[1:-1,1:-1] = un[1:-1,1:-1] - dt/dx*un[1:-1,1:-1]*(un[1:-1,1:-1]-un[0:-2,1:-1])-dt/dy*vn[1:-1,1:-1]* \
(un[1:-1,1:-1]-un[1:-1,0:-2])+nu*dt/dx**2*(un[2:,1:-1]-2*un[1:-1,1:-1]+un[0:-2,1:-1])+ \
nu*dt/dy**2*(un[1:-1,2:]-2*un[1:-1,1:-1]+un[1:-1,0:-2])
v[1:-1,1:-1] = vn[1:-1,1:-1] - dt/dx*un[1:-1,1:-1]*(vn[1:-1,1:-1]-vn[0:-2,1:-1])-dt/dy*vn[1:-1,1:-1]* \
(vn[1:-1,1:-1]-vn[1:-1,0:-2])+nu*dt/dx**2*(vn[2:,1:-1]-2*vn[1:-1,1:-1]+vn[0:-2,1:-1])+ \
nu*dt/dy**2*(vn[1:-1,2:]-2*vn[1:-1,1:-1]+vn[1:-1,0:-2])
u[0,:] = 1
u[-1,:] = 1
u[:,0] = 1
u[:,-1] = 1
v[0,:] = 1
v[-1,:] = 1
v[:,0] = 1
v[:,-1] = 1
#fig = plt.figure(figsize=(11,7), dpi=100)
ax3 = fig.add_subplot(2,2,3, projection='3d')
ax4 = fig.add_subplot(2,2,4, projection='3d')
X,Y = np.meshgrid(x,y)
wire1 = ax3.plot_wireframe(X,Y,u[:])
wire2 = ax4.plot_wireframe(X,Y,v[:])
plt.show()
| gpl-3.0 |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py | 69 | 37420 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about
major or minor ticks, they are used by the Axis class to support major
and minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The
locators handle autoscaling of the view limits based on the data
limits, and the choosing of tick locations. A useful semi-automatic
tick locator is MultipleLocator. You initialize this with a base, eg
10, and it picks axis limits and ticks that are multiples of your
base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class TickHelper:
axis = None
class DummyAxis:
def __init__(self):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self):
if self.axis is None:
self.axis = self.DummyAxis()
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
'Return the format for tick val x at position pos; pos=None indicated unspecified'
raise NotImplementedError('Derived must overide')
def format_data(self,value):
return self.__call__(value)
def format_data_short(self,value):
'return a short string version'
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interative coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
seq is a sequence of strings. For positions `i<len(seq)` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos>=len(self.seq): return ''
else: return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x,d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 1e-3 or data >= 1e4.
"""
def __init__(self, useOffset=True, useMathText=False):
# useOffset allows plotting small data ranges with large offsets:
# for example: [1+1e-9,1+2e-9,1+3e-9]
# useMathText will render the offset and scientific notation in mathtext
self._useOffset = useOffset
self._usetex = rcParams['text.usetex']
self._useMathText = useMathText
self.offset = 0
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
def fix_minus(self, s):
'use a unicode minus rather than hyphen'
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s
else: return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs)==0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in
which scientific notation is used for numbers less than
1e-3 or greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def format_data(self,value):
'return a formatted string representation of a number'
s = self._formatSciNotation('%1.10e'% value)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs)==0: return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0: offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10**self.orderOfMagnitude)
else:
sciNotStr = '1e%d'% self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$',sciNotStr,offsetStr,'$'))
else:
s = ''.join((sciNotStr,offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax-vmin)
if self._useOffset: self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format()
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs
if ave_loc < 0:
self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom
else:
self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom)
else: self.offset = 0
def _set_orderOfMagnitude(self,range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset: oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]: val = locs[0]
else: val = locs[-1]
if val == 0: oom = 0
else: oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
# The floating point black magic (adding 1e-15 and formatting
# to 8 digits) may warrant review and cleanup.
locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \
for loc in locs]
sigfigs.sort()
self.format = '%1.' + str(sigfigs[-1]) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x-self.offset)/10**self.orderOfMagnitude
if np.absolute(xp) < 1e-8: xp = 0
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}'%(sign, exponent)
if significand and exponent:
return r'%s{\times}%s'%(significand, exponent)
else:
return r'%s%s'%(significand, exponent)
else:
s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e')
return s
except IndexError, msg:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
if attribute *decadeOnly* is True, only the decades will be labelled.
"""
def __init__(self, base=10.0, labelOnlyBase = True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base+0.0
self.labelOnlyBase=labelOnlyBase
self.decadeOnly = True
def base(self,base):
'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`'
self._base=base
def label_minor(self,labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase=labelOnlyBase
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b=self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
elif x>10000: s= '%1.0e'%x
elif x<1: s = '%1.0e'%x
else : s = self.pprint_val(x,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self,value):
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = True
return value
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def is_decade(self, x):
n = self.nearest_long(x)
return abs(x-n)<1e-10
def nearest_long(self, x):
if x==0: return 0L
elif x>0: return long(x+0.5)
else: return long(x-0.5)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
b=self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
#if 0: pass
elif fx>10000: s= '%1.0e'%fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx<1: s = '%1.0e'%fx
else : s = self.pprint_val(fx,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
# only label the decades
if x == 0:
return '$0$'
sign = np.sign(x)
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
usetex = rcParams['text.usetex']
if sign == -1:
sign_string = '-'
else:
sign_string = ''
if not isDecade and self.labelOnlyBase: s = ''
elif not isDecade:
if usetex:
s = r'$%s%d^{%.2f}$'% (sign_string, b, fx)
else:
s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx)
else:
if usetex:
s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx))
else:
s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx))
return s
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis`
because the locator stores references to the Axis data and view
limits
"""
def __call__(self):
'Return the locations of the ticks'
raise NotImplementedError('Derived must override')
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
'autoscale the view limits'
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
'Pan numticks (can be positive or negative)'
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if numticks>2:
step = numsteps*abs(ticks[0]-ticks[1])
else:
d = abs(vmax-vmin)
step = numsteps*d/6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
interval = abs(vmax-vmin)
step = 0.1*interval*direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
'refresh internal information based on current lim'
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
'Return the locations of the ticks'
dmin, dmax = self.axis.get_data_interval()
return np.arange(dmin + self.offset, dmax+1, self._base)
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
"""
def __init__(self, locs, nbins=None):
self.locs = locs
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
'Return the locations of the ticks'
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
return self.locs[::step]
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
'Return the locations of the ticks'
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks = None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if vmax<vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks==0: return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return ticklocs
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
if vmin==vmax:
vmin-=1
vmax+=1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10**(-exponent)
vmin = math.floor(scale*vmin)/scale
vmax = math.ceil(scale*vmax)/scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x,y):
if abs(x-y)<1e-10: return True
else: return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base>0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return (d-1)*self._base
return d*self._base
def le(self, x):
'return the largest multiple of base <= x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1): # was closeto(m, self._base)
#looks like floating point error
return (d+1)*self._base
return d*self._base
def gt(self, x):
'return the smallest multiple of base > x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1):
#looks like floating point error
return (d+2)*self._base
return (d+1)*self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return d*self._base
return (d+1)*self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
if vmax<vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001*base)//base
locs = vmin + np.arange(n+1) * base
return locs
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin==vmax:
vmin -=1
vmax +=1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n = 1, threshold=100):
dv = abs(vmax - vmin)
maxabsv = max(abs(vmin), abs(vmax))
if maxabsv == 0 or dv/maxabsv < 1e-12:
return 1.0, 0.0
meanv = 0.5*(vmax+vmin)
if abs(meanv)/dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10**ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10**ex
ex = divmod(math.log10(dv/n), 1)[0]
scale = 10**ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False):
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if integer:
self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin -= offset
vmax -= offset
raw_step = (vmax-vmin)/nbins
scaled_raw_step = raw_step/scale
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step*divmod(vmin, step)[0]
best_vmax = best_vmin + step*nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins+1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
return self.bin_boundaries(vmin, vmax)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05)
return np.take(self.bin_boundaries(dmin, dmax), [0,-1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
lx = math.floor(math.log(x)/math.log(base))
return base**lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
lx = math.ceil(math.log(x)/math.log(base))
return base**lx
def is_decade(x,base=10):
lx = math.log(x)/math.log(base)
return lx==int(lx)
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = 15
def base(self,base):
"""
set the base of the log scaling (major tick every base**i, i interger)
"""
self._base=base+0.0
def subs(self,subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs)+0.0
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b=self._base
vmin, vmax = self.axis.get_view_interval()
if vmin <= 0.0:
vmin = self.axis.get_minpos()
if vmin <= 0.0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
vmin = math.log(vmin)/math.log(b)
vmax = math.log(vmax)/math.log(b)
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None: # autosub
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax)+stride, stride)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b**decades:
ticklocs.extend( subs*decadeStart )
else:
ticklocs = b**decades
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
minpos = self.axis.get_minpos()
if minpos<=0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)
if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)
if vmin==vmax:
vmin = decade_down(vmin,self._base)
vmax = decade_up(vmax,self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
self._subs = subs
self.numticks = 15
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b = self._transform.base
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = self._transform.transform((vmin, vmax))
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None:
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
else:
ticklocs = np.sign(decades) * b ** np.abs(decades)
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax<vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d<=0:
locator = MultipleLocator(0.2)
else:
try: ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10**fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5*base : ticksize = base
elif d >= 2*base : ticksize = base/2.0
else : ticksize = base/5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', )
| agpl-3.0 |
phbradley/tcr-dist | read_random_tcr_distances.py | 1 | 18308 | from basic import *
import numpy as np
import parse_tsv
from scipy.stats import gaussian_kde
import util
#from tcr_distances_blosum
import tcr_distances
import html_colors
from all_genes import all_genes
with Parser(locals()) as p:
#p.str('organism').required()
p.str('clones_file').required()
p.str('file_tag').default('')
p.str('outfile_prefix')
p.str('organism').required()
p.float('nbrdist_rescale').default(1.0) # --float_arg 9.6
p.str('single_nbrdist_tag_for_plotting').default('wtd_nbrdist10')
p.flag('include_non_wtd') # --flag_arg (no argument passed)
p.flag('show') # --flag_arg (no argument passed)
p.flag('paper_supp') # --flag_arg (no argument passed)
p.flag('exclude_xr') # --flag_arg (no argument passed)
import matplotlib
matplotlib.rcParams['mathtext.default'] = 'regular'
if not show: matplotlib.use('Agg')
import matplotlib.pyplot as plt
def greek_seg( seg ):
return seg.replace( 'A', r'$\alpha$' ).replace( 'B', r'$\beta$' )
fake_chains = util.detect_fake_chains( clones_file )
## don't exclude xr guys just for discrimination vs random
##
ABs = ['A','B','AB']
## read random distances
random_nbrdists_file = '{}_random_nbrdists{}.tsv'.format( clones_file[:-4],file_tag )
assert exists(random_nbrdists_file)
outlogfile = '{}_random_aucs.log'.format( clones_file[:-4] )
print 'making:',outlogfile
outlog =open( outlogfile,'w')
if outfile_prefix is None:
outfile_prefix = random_nbrdists_file[:-4]
infields = []
rand_nbrdists = []
for line in open( random_nbrdists_file,'r'):
if not infields:
infields = line[:-1].split('\t')
rand_nbrdist_tags = [ x for x in infields if ( 'rank' not in x and 'nbrdist' in x ) ]
else:
l = parse_tsv_line( line[:-1], infields )
nbrdists = dict( ( ( x, l[x]) for x in rand_nbrdist_tags ) )
rand_nbrdists.append( nbrdists )
clones_file_with_nbrdists = '{}_nbrdists.tsv'.format( clones_file[:-4] )
header = open( clones_file_with_nbrdists,'r').readline()[:-1].split('\t')
nbrdist_tags = [ x for x in header if x in rand_nbrdist_tags and ( 'wtd' in x or include_non_wtd ) ]
nbrdist_tags.sort()
num_nbrdist_tags = len(nbrdist_tags)
Log('parsing {} for {} nbrdist_tags'.format( clones_file_with_nbrdists, num_nbrdist_tags ) )
tcr_fields = nbrdist_tags + [ 'va_genes','vb_genes','cdr3a','cdr3b']
all_tcrs = parse_tsv.parse_tsv_file( clones_file_with_nbrdists, ['epitope'], tcr_fields )
## look for cross-reactive tcrs
for e in all_tcrs:
new_tcrs = []
for l in all_tcrs[e]:
va_reps = frozenset( ( all_genes[organism][x].rep for x in l[-4].split(';') ) )
vb_reps = frozenset( ( all_genes[organism][x].rep for x in l[-3].split(';') ) )
new_tcrs.append( [ (nbrdist_rescale * float(x)) for x in l[:num_nbrdist_tags] ] +
[ va_reps, vb_reps, l[-2], l[-1], False ] ) ## add X-reactive flag
all_tcrs[e] = new_tcrs
va_genes_index = num_nbrdist_tags
vb_genes_index = num_nbrdist_tags+1
cdr3a_index = num_nbrdist_tags+2
cdr3b_index = num_nbrdist_tags+3
def same_tcr( a, b ):
return ( a[cdr3a_index] == b[cdr3a_index] and
a[cdr3b_index] == b[cdr3b_index] and
(not a[va_genes_index].isdisjoint( b[va_genes_index])) and
(not a[vb_genes_index].isdisjoint( b[vb_genes_index]) ) )
if exclude_xr:
print 'finding X-reactive'
for e1 in all_tcrs:
for e2 in all_tcrs:
if e1==e2: continue
for a in all_tcrs[e1]:
for b in all_tcrs[e2]:
if same_tcr(a,b):
print 'X-react:',e1,a[cdr3a_index],a[cdr3b_index],e2,b[cdr3a_index],b[cdr3b_index]
a[-1] = True
b[-1] = True
epitopes = all_tcrs.keys()
epitopes.sort()
all_aucs_random = {}
all_aucs_others = {}
nbrdist_tags_for_plotting = []
for epitope in epitopes:
for chains in ABs:
prefix = '{}_{}_'.format(epitope,chains)
for ii_nbrdist_tag, nbrdist_tag in enumerate( nbrdist_tags ):
if not nbrdist_tag.startswith(prefix): continue
nbrdist_tag_suffix = nbrdist_tag[len(prefix):]
nbrdist_tag_suffix_w_chains = chains+'_'+nbrdist_tag[len(prefix):]
if nbrdist_tag_suffix not in all_aucs_others:
all_aucs_others[nbrdist_tag_suffix] = []
all_aucs_random[nbrdist_tag_suffix] = []
nbrdist_tags_for_plotting.append( nbrdist_tag_suffix )
if nbrdist_tag_suffix_w_chains not in all_aucs_others:
all_aucs_others[nbrdist_tag_suffix_w_chains] = []
all_aucs_random[nbrdist_tag_suffix_w_chains] = []
## sort the nbrdist_tags_for_plotting
l = []
for tag in nbrdist_tags_for_plotting:
if 'nbrdist' in tag:
num = int( tag[ tag.index('nbrdist')+7:] )
if num<0:
l.append( ( -1*num, tag, 'nbrdist{}'.format(-1*num)) )
else:
l.append( ( 100*num,tag, 'nbrdist{}P'.format(num)))
else:
assert 'dens' in tag
sdev = int( tag[ tag.index('dens')+4:] )
l.append( ( 10000*sdev, tag, 'nbrdens{}'.format(sdev)))
l.sort()
nbrdist_tags_for_plotting = [x[1] for x in l] ## now sorted from most focused to most averaged
nbrdist_labels_for_plotting = [x[2] for x in l] ## now sorted from most focused to most averaged
#print 'nbrdist_tags_for_plotting:',nbrdist_tags_for_plotting
#print 'nbrdist_labels_for_plotting:',nbrdist_tags_for_plotting
nrows = len(epitopes)
ncols = len(nbrdist_tags_for_plotting)
nrows_single = len(epitopes)
ncols_single = 6
top_margin_inches = 1.25
bottom_margin_inches = 0.5
left_margin_inches = 1.0
right_margin_inches = 1.0
plot_height_inches = 2.0 * nrows
plot_width_inches = 3.0 * ncols
fig_height = top_margin_inches + plot_height_inches + bottom_margin_inches
fig_width = left_margin_inches + plot_width_inches + right_margin_inches
fig_width_summary = 12.0
top_margin = float( plot_height_inches + bottom_margin_inches ) / fig_height
bottom_margin = float( bottom_margin_inches ) / fig_height
left_margin = float( left_margin_inches ) / fig_width
right_margin = float( left_margin_inches + plot_width_inches ) / fig_width
assert single_nbrdist_tag_for_plotting in nbrdist_tags_for_plotting
## these hold dats for the single_nbrdist_tag_for_plotting nbrdist_tag
save_epitope_nbrdists = {}
save_epitope_rocs = {}
for ii_epitope, epitope in enumerate(epitopes):
other_epitopes = [ x for x in epitopes if x != epitope ]
for ii_chains,chains in enumerate( ABs ):
prefix = '{}_{}_'.format(epitope,chains)
for ii_nbrdist_tag, nbrdist_tag in enumerate( nbrdist_tags ):
if not nbrdist_tag.startswith(prefix): continue
nbrdist_tag_suffix = nbrdist_tag[len(prefix):]
nbrdist_tag_suffix_w_chains = chains+'_'+nbrdist_tag[len(prefix):]
if nbrdist_tag_suffix not in all_aucs_others:
all_aucs_others[nbrdist_tag_suffix] = []
all_aucs_random[nbrdist_tag_suffix] = []
if nbrdist_tag_suffix_w_chains not in all_aucs_others:
all_aucs_others[nbrdist_tag_suffix_w_chains] = []
all_aucs_random[nbrdist_tag_suffix_w_chains] = []
positive_nbrdists = [ x[ ii_nbrdist_tag ] for x in all_tcrs[ epitope ] if not x[-1] ]
negative_nbrdists_random = [ nbrdist_rescale*float( x[ nbrdist_tag ] ) for x in rand_nbrdists ]
sign_factor = -1.0 if 'dens' in nbrdist_tag else 1.0
auc_random, xvals_random, yvals_random = tcr_distances.compute_auc( positive_nbrdists, negative_nbrdists_random,
sign_factor = sign_factor )
outlog.write( 'auc_random {:7.3f} {:6d} {:6d} {:2s} {} {}\n'\
.format( auc_random, len(positive_nbrdists), len(negative_nbrdists_random),
chains, epitope, nbrdist_tag_suffix ) )
all_aucs_random[nbrdist_tag_suffix].append( auc_random )
all_aucs_random[nbrdist_tag_suffix_w_chains].append( auc_random )
## now versus TCRs from other epitopes
negative_nbrdists_others = [ x[ ii_nbrdist_tag ] for e2 in other_epitopes for x in all_tcrs[e2] if not x[-1] ]
auc_others, xvals_others, yvals_others = tcr_distances.compute_auc( positive_nbrdists, negative_nbrdists_others,
sign_factor = sign_factor )
outlog.write( 'auc_others {:7.3f} {:6d} {:6d} {:2s} {} {}\n'\
.format( auc_others, len(positive_nbrdists), len(negative_nbrdists_others),
chains, epitope, nbrdist_tag_suffix ) )
all_aucs_others[nbrdist_tag_suffix].append( auc_others )
all_aucs_others[nbrdist_tag_suffix_w_chains].append( auc_others )
## plotting ############################################################################################
# which plotno?
assert nbrdist_tag_suffix in nbrdist_tags_for_plotting
ii_nbrdist_tag_suffix = nbrdist_tags_for_plotting.index( nbrdist_tag_suffix )
plotno = ii_epitope * ncols + ii_nbrdist_tag_suffix + 1
## the kde-smoothed distributions
plt.figure(2*ii_chains+1,figsize=(fig_width,fig_height))
plt.subplot( nrows,ncols,plotno )
mn = min( positive_nbrdists + negative_nbrdists_random + negative_nbrdists_others )
mx = max( positive_nbrdists + negative_nbrdists_random + negative_nbrdists_others )
if chains in fake_chains:
continue ## the next line will fail if all positive_nbrdists are 0
positive_density = gaussian_kde( positive_nbrdists )
negative_density_random = gaussian_kde( negative_nbrdists_random )
if other_epitopes: negative_density_others = gaussian_kde( negative_nbrdists_others )
#line_style = ['--',':','-'][ ii_chains ]
line_style = '-'
xs = np.linspace( mn, mx, 100 )
ys0 = positive_density(xs)
if other_epitopes: ys1 = negative_density_others(xs)
ys2 = negative_density_random(xs)
plt.plot( xs, ys0, line_style, c='r' )
if other_epitopes: plt.plot( xs, ys1, line_style, c='g' )
plt.plot( xs, ys2, line_style, c='b' )
plt.title('{} {}'.format(epitope, nbrdist_labels_for_plotting[ ii_nbrdist_tag_suffix ] ) )
plt.suptitle('Smoothed {} {} distributions\nRed= epitope-specific TCRs\ngreen= TCRs from other epitopes\nblue= random TCRs'.format(chains, 'nbrdens' if 'dens' in nbrdist_tag else 'nbrdist' ),y=1.0)
plt.subplots_adjust( hspace = 0.3, bottom = bottom_margin, right = right_margin,
top = top_margin, left = left_margin )
if 'dens' in nbrdist_tag:
ymn,ymx = plt.ylim()
ymx = min( ymx, 4*max(ys0) )
plt.ylim( (ymn,ymx) )
plt.figure(2*ii_chains+2,figsize=(fig_width,fig_height))
plt.subplot( nrows,ncols,plotno )
#line_style = ['--',':','-'][ ii_chains ]
line_style = '-'
if other_epitopes:
plt.plot( xvals_others, yvals_others, line_style, c='g',label='others {}'.format(int(100*auc_others) ))
plt.plot( xvals_random, yvals_random, line_style, c='b',label='random {}'.format(int(100*auc_random) ))
plt.title('{} {}'.format(epitope, nbrdist_labels_for_plotting[ ii_nbrdist_tag_suffix ] ) )
plt.suptitle('{} ROC curves (AUROCs in legend)\nRed= epitope-specific TCRs\ngreen= TCRs from other epitopes\nblue= random TCRs'.format(chains),y=1.0)
plt.subplots_adjust( hspace = 0.3, bottom = bottom_margin, right = right_margin, top = top_margin,
left = left_margin )
plt.legend(loc='lower right',fontsize='small')
if nbrdist_tag_suffix == single_nbrdist_tag_for_plotting:
save_epitope_rocs[ epitope ] = ( xvals_random, yvals_random, auc_random )
save_epitope_nbrdists[ epitope ] = positive_nbrdists
## now put everything into a single plot
plt.figure(7,figsize=(fig_width_summary,fig_height))
plotno = ii_epitope * ncols_single + ii_chains + 1
plt.subplot( nrows_single, ncols_single, plotno )
## density:
plt.plot( xs, ys0, line_style, c='r' )
if other_epitopes: plt.plot( xs, ys1, line_style, c='g' )
plt.plot( xs, ys2, line_style, c='b' )
plt.yticks([],[])
if paper_supp:
locs,labels = plt.xticks()
print 'locs,labels:',locs,labels
newlocs,newlabels = [],[]
for loc in locs:
if abs(loc/100.0 - int(loc/100.0))<1e-3:
newlocs.append(loc)
newlabels.append(str(int(loc+1e-3)))
plt.xticks(newlocs,newlabels)
plt.title('{} {}'.format(epitope,greek_seg(chains)))
if 'dens' in nbrdist_tag:
ymn,ymx = plt.ylim()
ymx = min( ymx, 4*max(ys0) )
plt.ylim( (ymn,ymx) )
plotno = ii_epitope * ncols_single + ii_chains + 4
plt.subplot( nrows_single, ncols_single, plotno )
## ROC:
if other_epitopes:
plt.plot( xvals_others, yvals_others, line_style, c='g',label='others {}'.format(int(100*auc_others) ))
plt.plot( xvals_random, yvals_random, line_style, c='b',label='random {}'.format(int(100*auc_random) ))
plt.title('{} {}'.format(epitope,greek_seg(chains)))
plt.legend(loc='lower right',fontsize='small')
if paper_supp: ## special ticks
plt.xticks( [0,.2,.4,.6,.8,1.0], ['0','.2','.4','.6','.8','1'] )
plt.yticks( [0,.2,.4,.6,.8,1.0], ['0','.2','.4','.6','.8','1'] )
## suptitle
if paper_supp:
title = 'Smoothed NN-distance histograms (left columns) and ROC curves (right columns, AUROCs given in legend)\nRed= epitope-specific TCRs, green= TCRs from other epitopes, blue= random background TCRs'
plt.suptitle( title, y=( top_margin + 4.0 )/5.0 )
else:
plt.suptitle('Smoothed {} histograms (left cols) and ROC curves (right cols, AUROCs in legend)\nRed= epitope-specific TCRs, green= TCRs from other epitopes, blue= random TCRs'\
.format( nbrdist_labels_for_plotting[ ii_nbrdist_tag_suffix ] ), y=1.0)
plt.subplots_adjust( hspace = 0.3, bottom = bottom_margin, right = 0.98, top = top_margin, left = 0.05 )
for nbrdist_tag_suffix in sorted( all_aucs_others.keys()):
l_random = all_aucs_random[nbrdist_tag_suffix]
l_others = all_aucs_others[nbrdist_tag_suffix]
outlog.write( 'avg_auc_random {:7.3f} {}\n'.format( sum( l_random ) / len( l_random ), nbrdist_tag_suffix ) )
outlog.write( 'avg_auc_others {:7.3f} {}\n'.format( sum( l_others ) / len( l_others ), nbrdist_tag_suffix ) )
outlog.close()
for ii_roc in range(2):
for ii_chains,chains in enumerate(ABs):
figno = 2*ii_chains + ii_roc + 1
figtype = 'roc' if ii_roc else 'nbrdists'
pngfile = '{}_{}_{}.png'.format(outfile_prefix, figtype, chains )
print 'making:',pngfile
plt.figure(figno)
plt.savefig(pngfile)
if ii_roc==0:
util.readme(pngfile, """KDE-smoothed histograms of different {} nbrdist measures, comparing each epitope-specific repertoire (red)
to TCRs from the other repertoires (green) and to random TCRs (blue).<br><br>
""".format(chains))
else:
util.readme( pngfile, """ROC curves showing true- (y-axis) versus false-positives (x-axis) as the sorting metric increases. Legends give the area under the curve (AUROC) values, ranging from 100 (perfect discrimination, curve goes straight
up then straight across), to 50 (random), to 0 (total failure, all negatives come before all positives, curve goes straight across then straight up).<br><br>
""".format(chains))
figno = 7
pngfile = '{}_summary.png'.format(outfile_prefix)
plt.figure(figno)
print 'making:',pngfile
plt.savefig(pngfile)
util.readme( pngfile, """KDE-smoothed nbrdist10P histograms for alpha (col 1), beta (col 2), and alpha-beta (col 3). ROC curves for
the same in columns 4-6.<br><br>""")
## make a figure showing all the nbrdist distributions (left col) and all the ROC curves
figno = 10 ## hope this is larger than the others
plt.figure(figno,figsize=(12,8))
plt.subplot(121) ## the nbrdist distributions
mn = 0
mx = max( ( max(x) for x in save_epitope_nbrdists.values() ) )
for epitope,color in zip( epitopes, html_colors.get_rank_colors_no_lights(len(epitopes))):
nbrdists = save_epitope_nbrdists[epitope]
positive_density = gaussian_kde( nbrdists )
xs = np.linspace( mn, mx, 100 )
ys0 = positive_density(xs)
plt.plot( xs,ys0,c=color,label='{} ({:.1f})'.format(epitope,sum(nbrdists)/len(nbrdists)) )
plt.legend(fontsize=9,frameon=False,loc='best')
plt.subplot(122) ## now the ROC curves
for epitope,color in zip( epitopes, html_colors.get_rank_colors_no_lights(len(epitopes))):
xvals,yvals,auc = save_epitope_rocs[epitope]
plt.plot( xvals,yvals,c=color,label='{} ({:.3f})'.format(epitope,auc))
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.legend(fontsize=9,frameon=False,loc='best')
pngfile = '{}_nbrdist_roc_superpositions.png'.format(outfile_prefix)
print 'making:',pngfile
plt.savefig(pngfile)
util.readme( pngfile, """
Superimposed KDE-smoothed NNdistance ({}) distributions (left) and ROC curves (right) for all epitopes (paired-chain analyses).<br><br>
""".format(single_nbrdist_tag_for_plotting))
if show:
plt.show()
| mit |
kellyschrock/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 32 | 14968 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_TYPE to 10 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/delaunay/triangulate.py | 21 | 10191 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import warnings
import numpy as np
from matplotlib._delaunay import delaunay
from .interpolate import LinearInterpolator, NNInterpolator
from matplotlib.cbook import warn_deprecated
warn_deprecated('1.4',
name='matplotlib.delaunay',
alternative='matplotlib.tri.Triangulation',
obj_type='module')
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull
of the points and there is no neighbor on that edge. The values are
ordered such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also
in counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
Duplicate points.
If there are no duplicate points, Triangulation stores the specified
x and y arrays and there is no difference between the client's and
Triangulation's understanding of point indices used in edge_db,
triangle_nodes and hull.
If there are duplicate points, they are removed from the stored
self.x and self.y as the underlying delaunay code cannot deal with
duplicates. len(self.x) is therefore equal to len(x) minus the
number of duplicate points. Triangulation's edge_db, triangle_nodes
and hull refer to point indices in self.x and self.y, for internal
consistency within Triangulation and the corresponding Interpolator
classes. Client code must take care to deal with this in one of
two ways:
1. Ignore the x,y it specified in Triangulation's constructor and
use triangulation.x and triangulation.y instead, as these are
consistent with edge_db, triangle_nodes and hull.
2. If using the x,y the client specified then edge_db,
triangle_nodes and hull should be passed through the function
to_client_point_indices() first.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
duplicates = self._get_duplicate_point_indices()
if len(duplicates) > 0:
warnings.warn(
"Input data contains duplicate x,y points; some values are "
"ignored.",
DuplicatePointWarning,
)
# self.j_unique is the array of non-duplicate indices, in
# increasing order.
self.j_unique = np.delete(np.arange(len(self.x)), duplicates)
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
# If there are duplicate points, need a map of point indices used
# by delaunay to those used by client. If there are no duplicate
# points then the map is not needed. Either way, the map is
# conveniently the same as j_unique, so share it.
self._client_point_index_map = self.j_unique
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _get_duplicate_point_indices(self):
"""Return array of indices of x,y points that are duplicates of
previous points. Indices are in no particular order.
"""
# Indices of sorted x,y points.
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_duplicates = np.hstack([
False,
(np.diff(self.x[j_sorted]) == 0) &
(np.diff(self.y[j_sorted]) == 0),
])
# Array of duplicate point indices, in no particular order.
return j_sorted[mask_duplicates]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:, 0]][:, 1],
self.triangle_nodes[border[:, 0]][:, 2])))
edges.update(dict(zip(self.triangle_nodes[border[:, 1]][:, 2],
self.triangle_nodes[border[:, 1]][:, 0])))
edges.update(dict(zip(self.triangle_nodes[border[:, 2]][:, 0],
self.triangle_nodes[border[:, 2]][:, 1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def to_client_point_indices(self, array):
"""Converts any array of point indices used within this class to
refer to point indices within the (x,y) arrays specified in the
constructor before duplicates were removed.
"""
if self._client_point_index_map is not None:
return self._client_point_index_map[array]
else:
return array
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx - minx) / 2, (maxy - miny) / 2)
midx = (minx + maxx) / 2.0
midy = (miny + maxy) / 2.0
xp, yp = np.array([[midx + 3 * M, midx, midx - 3 * M],
[midy, midy + 3 * M, midy - 3 * M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:, 0] = self.x
xy1[:, 1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp * c[0] + yp * c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/numpy/fft/fftpack.py | 22 | 45592 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex, copy=False)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| mit |
trmznt/fatools | fatools/lib/fautil/cmds.py | 2 | 10256 | # provide commands for Fragment Analysis (FA)
from fatools.lib import params
from fatools.lib.utils import cerr, cout, cverr, cexit, tokenize, detect_buffer, set_verbosity
import argparse, yaml, csv, os
from io import StringIO
def init_argparser(parser=None):
p = parser if parser else argparse.ArgumentParser('facmd')
p.add_argument('--sqldb', default=False,
help = 'SQLite3 database filename')
p.add_argument('--file', default=None,
help = "Comma-separated FSA filenames (optional)")
p.add_argument('--fsdb', default=None,
help = 'Filesystem-based database')
p.add_argument('--infile', default=None,
help = 'Tab-delimited or CSV manifest file')
# command in ascending order
p.add_argument('--clear', default=False, action='store_true',
help = 'clear (or remove) all peaks from FSA file')
p.add_argument('--align', default=False, action='store_true',
help = 'scan ladder channel, preannotate and align with size standards')
p.add_argument('--call', default=False, action='store_true',
help = 'scan non-ladder channels, preannotate peaks and determine their sizes')
p.add_argument('--bin', default=False, action='store_true',
help = 'bin non-ladder peaks')
p.add_argument('--annotate', default=False, action='store_true',
help = 'annotate non-ladder peaks')
p.add_argument('--plot', default=False, action='store_true',
help = 'plot normalized trace')
p.add_argument('--split-plot', action='store_true',
help='plot dye separately')
p.add_argument('--dendogram', default=False, action='store_true',
help = 'plot dendograms of ladders and alleles')
p.add_argument('--ladder-plot', action='store_true',
help='report and plot ladder alignment for assessment purposes')
# semi-mandatory
p.add_argument('--panel', default="",
help = 'comma-separated panel code(s) (prebuilt options: GS600LIZ, GS500LIZ')
p.add_argument('--marker', default="",
help = 'comma-separated marker code(s)')
p.add_argument('--panelfile', default="",
help = 'YAML panel file')
p.add_argument('--markerfile', default="",
help = "YAML marker file")
# options
p.add_argument('--score', default=1.0, type=float,
help = 'minimum alignment score threshold to be plotted')
p.add_argument('--rss', default=-1, type=float,
help = 'maximum rss threshold to be plotted')
p.add_argument('--cluster', default=0, type=int,
help = 'number of cluster for hierarchical clustering alignment')
p.add_argument('--verbose', default=0, type=int,
help = 'show verbosity')
p.add_argument('--cache-path',
help='store cache in other location (defaults to home)')
p.add_argument('--no-cache', default=False, action='store_true',
help = 'do not use caches')
p.add_argument('--plot-file',
help='save --plot or --split-plot result into a file')
p.add_argument('--outfile',
help = 'output filename')
p.add_argument('--commit', default=False, action='store_true',
help = 'commit to database')
return p
def main(args):
if args.verbose != 0:
set_verbosity(args.verbose)
dbh = None
if args.file or args.infile:
cverr(4, 'D: opening FSA file(s)')
fsa_list = open_fsa(args)
elif dbh is None:
cverr(4, 'D: connecting to database')
dbh = get_dbhandler(args)
fsa_list = get_fsa_list(args, dbh)
cerr('I: obtained %d FSA' % len(fsa_list))
if args.commit:
with transaction.manager:
do_facmd(args, fsa_list, dbh)
cerr('** COMMIT to database **')
elif dbh:
cerr('WARNING ** running without database COMMIT! All changes will be discarded!')
if not ( args.test or args.y ):
keys = input('Do you want to continue [y/n]? ')
if not keys.lower().strip().startswith('y'):
sys.exit(1)
do_facmds(args, fsa_list, dbh)
else:
do_facmds(args, fsa_list)
def do_facmds(args, fsa_list, dbh=None):
executed = 0
if args.clear:
do_clear( args, fsa_list, dbh )
executed += 1
if args.align:
do_align( args, fsa_list, dbh )
executed += 1
if args.call:
do_call( args, fsa_list, dbh )
executed += 1
if args.plot or args.split_plot or args.ladder_plot:
do_plot(args, fsa_list, dbh)
executed += 1
if args.dendogram:
do_dendogram( args, fsa_list, dbh)
executed += 1
if executed == 0:
cerr('W: please provide a relevant command')
else:
cerr('I: executed %d command(s)' % executed)
def do_clear( args, fsa_list, dbh ):
pass
def do_align( args, fsa_list, dbh ):
cerr('I: Aligning size standards...')
for (fsa, sample_code) in fsa_list:
cverr(3, 'D: aligning FSA %s' % fsa.filename)
fsa.align(params.Params())
def do_call( args, fsa_list, dbh ):
cerr('I: Calling non-ladder peaks...')
for (fsa, sample_code) in fsa_list:
cverr(3, 'D: calling FSA %s' % fsa.filename)
fsa.call(params.Params(), args.marker)
def do_plot(args, fsa_list, dbh):
cerr('I: Creating plot...')
from fatools.lib.fautil import plot
plot.plot(args, fsa_list, dbh)
def do_dendogram( args, fsa_list, dbh ):
from fatools.lib.fautil import hclustalign
from matplotlib import pyplot as plt
for (fsa, sample_code) in fsa_list:
c = fsa.get_ladder_channel()
c.scan(params.Params()) # scan first if necessary
ladder = fsa.panel.get_ladder()
peaks = c.get_alleles()
#initial_pair, P, L = hclustalign.hclust_align(peaks, ladder)
P = hclustalign.generate_tree( [ (n.rtime, 0) for n in peaks ] )
L = hclustalign.generate_tree( [ (e, 0) for e in ladder['sizes'] ] )
clusters = hclustalign.fcluster(L.z, args.cluster or ladder['k'], criterion="maxclust")
print(clusters)
clusters = hclustalign.fcluster(P.z, args.cluster or ladder['k'], criterion="maxclust")
print(clusters)
plt.figure()
plt.subplot(121)
hclustalign.dendrogram(L.z, leaf_rotation=90, leaf_font_size=8,
labels = [ x[0] for x in L.p ])
plt.subplot(122)
hclustalign.dendrogram(P.z, leaf_rotation=90, leaf_font_size=8,
labels = [ x[0] for x in P.p ])
plt.show()
def open_fsa( args ):
""" open FSA file(s) and prepare fsa instances
requires: args.file, args.panel, args.panelfile
"""
from fatools.lib.fileio.models import Marker, Panel, FSA
if not args.panel:
cexit('ERR: using FSA file(s) requires --panel argument!')
if not args.panelfile:
cerr('WARN: using default built-in panels')
Panel.upload(params.default_panels)
else:
with open(args.panelfile) as f:
# open a YAML file that describe panel sets
Panel.upload(yaml.load(f))
if not args.markerfile:
Marker.upload(params.default_markers)
else:
raise NotImplementedError()
panel = Panel.get_panel(args.panel)
fsa_list = []
index = 1
# prepare caching
cache_path = None
if not args.no_cache:
cache_path = os.path.join(os.path.expanduser('~'), '.fatools_caches', 'channels')
if args.cache_path is not None:
cache_path = os.path.join(args.cache_path, '.fatools_caches', 'channels')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
if args.file:
for fsa_filename in args.file.split(','):
fsa_filename = fsa_filename.strip()
fsa = FSA.from_file(fsa_filename, panel, cache=not args.no_cache,
cache_path=cache_path)
# yield (fsa, str(i))
fsa_list.append( (fsa, str(index)) )
index += 1
elif args.infile:
with open(args.infile) as f:
buf, delim = detect_buffer( f.read() )
inrows = csv.DictReader( StringIO(buf), delimiter=delim )
line = 1
index = 1
for r in inrows:
line += 1
fsa_filename = r['FILENAME'].strip()
if fsa_filename.startswith('#'):
continue
if r.get('OPTIONS', None):
options = tokenize( r['OPTIONS'] )
else:
options = None
panel_code = r.get('PANEL', None) or args.panel
panel = Panel.get_panel(panel_code)
fsa = FSA.from_file(fsa_filename, panel, options, cache=not args.no_cache,
cache_path=cache_path)
if 'SAMPLE' in inrows.fieldnames:
# yield (fsa, r['SAMPLE'])
fsa_list.append( (fsa, r['SAMPLE']) )
else:
# yield (fsa, str(index))
fsa_list.append( (fsa, str(index)) )
index += 1
return fsa_list
def get_fsa_list( args, dbh ):
"""
get fsa instance from database based on parameters in args
"""
if not args.batch:
cexit('ERR: using database requires --batch argument!', 1)
batch = dbh.get_batch( args.batch )
if not batch:
cexit('ERR: batch %s not found!' % args.batch, 1)
samples = []
if args.sample:
samples = args.sample.split(',')
fsas = []
if args.fsa:
fsas = args.assay.split(',')
panels = []
if args.panel:
panels = args.panel.split(',')
markers = []
if args.marker:
markers = dbh.get_markers(args.panel.split(','))
fsa_list = []
for sample in batch.samples:
if samples and sample.code not in samples: continue
for assay in sample.assays:
if assays and assay.filename not in assays: continue
if panels and assay.panel.code not in panels: continue
fsa_list.append( (assay, sample.code) )
cerr('I: number of assays to be processed: %d' % len(assay_list))
return fsa_list
| lgpl-3.0 |
dynaryu/Wind_multipliers | tests/test_topographic/test_findpeaks.py | 1 | 5370 | """
Title: test_findpeaks.py
Author: Tina Yang, [email protected]
CreationDate: 2014-05-01
Description: Unit testing module for findpeaks function in findpeaks.py
Version: $Rev$
$Id$
"""
import sys
import os.path
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib import pyplot
import logging as log
from test_all_topo_engineered_data import test_line, expect_results
from inspect import getfile, currentframe
def escarpment_factor(profile, ridge, valley, data_spacing):
"""
Calculate escarpment factor
"""
max_escarp = 3
min_escarp = 0.5
nrow = np.size(profile)
H = profile[ridge] - profile[valley]
Lu = abs(ridge - valley) * data_spacing / 2
slope = H / (2 * Lu)
beta_ind = np.minimum(nrow - 1, np.floor(ridge + (2 * Lu / data_spacing)))
H_r2beta = profile[ridge] - profile[beta_ind]
D_r2beta = (beta_ind - ridge) * data_spacing
if D_r2beta > 0: # D_r2beta can be 0, 25, 50, ...
slope_r2mL2 = H_r2beta/D_r2beta
# when a symmetrical ridge slope_r2mL2=slope so escarp_factor=1
# If slope_r2mL2=0, escarp_factor=2.5
escarp_factor = 2.5 - 1.5 * slope_r2mL2 / slope
if escarp_factor < min_escarp:
escarp_factor = min_escarp
elif escarp_factor > max_escarp:
escarp_factor = max_escarp
else: # the ridge is on the end
slope_r2mL2 = 999
escarp_factor = 1
return H, slope, slope_r2mL2, escarp_factor
class TestFindpeaks(unittest.TestCase):
def setUp(self):
self.data_spacing = 25
def test_findpeaks(self):
cmd_folder = os.path.realpath(
os.path.abspath(os.path.split(
getfile(currentframe()))[0]))
parent = os.path.abspath(os.path.join(cmd_folder, os.pardir))
grandparent = os.path.abspath(os.path.join(parent, os.pardir))
if grandparent not in sys.path:
sys.path.insert(0, grandparent)
from topographic.findpeaks import findpeaks, findvalleys
# test for each scenerio
for p in range(1, len(test_line)+1):
#for p in range(3, 4):
print '\ntest ' + str(p) + ' ...'
nrow = np.size(test_line[p])
# take the largest integer of each element of the data line
fwd_line = np.floor(test_line[p])
# Get the indices of the ridges & valleys
ridge_ind = findpeaks(fwd_line) # relative ind
valley_ind = findvalleys(fwd_line) # relative ind
print ridge_ind
print valley_ind
nrow = np.size(ridge_ind)
H = np.ones((nrow, 1), dtype=float)
slope = np.ones((nrow, 1), dtype=float)
downwind_slope = np.ones((nrow, 1), dtype=float)
escarp_factor = np.ones((nrow, 1), dtype=float)
if np.size(ridge_ind) == 0: # the DEM is completely flat
log.debug( "Flat line" )
# the DEM is downward slope all the time
elif np.size(ridge_ind) == 1 and ridge_ind[0] == 0:
log.debug( "Downward slope" )
else: # 2 general cases, calculate m, works as Mh.m
if ridge_ind[0] == 0: # (1) down up down up ....
for i in range(1, np.size(ridge_ind)):
H[i], slope[i], downwind_slope[i], escarp_factor[i] = \
escarpment_factor(fwd_line, ridge_ind[i],
valley_ind[i-1],
self.data_spacing)
else: # (2) up dowm up dowm ....
for i in range(0, np.size(ridge_ind)):
H[i], slope[i], downwind_slope[i], escarp_factor[i] = \
escarpment_factor(fwd_line, ridge_ind[i],
valley_ind[i], self.data_spacing)
hill_no = np.size(ridge_ind)
# import pdb
# pdb.set_trace()
scripts_result = np.concatenate([[hill_no], H.flatten(),
slope.flatten(),
downwind_slope.flatten(),
escarp_factor.flatten()])
print scripts_result
print expect_results[p]
#plot the line profile
# point_no = len(test_line[p])
# x = np.arange(point_no)
# y = test_line[p]
# pyplot.plot(x, y, 'g')
# pyplot.show()
assert_almost_equal(scripts_result, expect_results[p], decimal=2,
err_msg='',verbose=True)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
probcomp/bayeslite | src/bql.py | 1 | 47612 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BQL execution.
This module implements the main dispatcher for executing different
kinds of BQL phrases. Queries, as in ``SELECT``, ``ESTIMATE``, and so
on, are compiled into SQL; commands, as in ``CREATE TABLE``,
``INSERT``, and the rest of the DDL/DML (Data Definition/Modelling
language) are executed directly.
"""
import itertools
import apsw
import bayeslite.ast as ast
import bayeslite.bqlfn as bqlfn
import bayeslite.compiler as compiler
import bayeslite.core as core
import bayeslite.txn as txn
from bayeslite.exception import BQLError
from bayeslite.guess import bayesdb_guess_stattypes
from bayeslite.read_csv import bayesdb_read_csv_file
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import casefold
from bayeslite.util import cursor_value
def execute_phrase(bdb, phrase, bindings=()):
"""Execute the BQL AST phrase `phrase` and return a cursor of results."""
if isinstance(phrase, ast.Parametrized):
n_numpar = phrase.n_numpar
nampar_map = phrase.nampar_map
phrase = phrase.phrase
assert 0 < n_numpar
else:
n_numpar = 0
nampar_map = None
# Ignore extraneous bindings. XXX Bad idea?
if ast.is_query(phrase):
# Compile the query in the transaction in case we need to
# execute subqueries to determine column lists. Compiling is
# a quick tree descent, so this should be fast.
out = compiler.Output(n_numpar, nampar_map, bindings)
with bdb.savepoint():
compiler.compile_query(bdb, phrase, out)
winders, unwinders = out.getwindings()
return execute_wound(bdb, winders, unwinders, out.getvalue(),
out.getbindings())
if isinstance(phrase, ast.Begin):
txn.bayesdb_begin_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.Rollback):
txn.bayesdb_rollback_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.Commit):
txn.bayesdb_commit_transaction(bdb)
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateTabAs):
assert ast.is_query(phrase.query)
with bdb.savepoint():
if core.bayesdb_has_table(bdb, phrase.name):
if phrase.ifnotexists:
return empty_cursor(bdb)
else:
raise BQLError(bdb,
'Name already defined as table: %s' %
(repr(phrase.name),))
out = compiler.Output(n_numpar, nampar_map, bindings)
qt = sqlite3_quote_name(phrase.name)
temp = 'TEMP ' if phrase.temp else ''
ifnotexists = 'IF NOT EXISTS ' if phrase.ifnotexists else ''
out.write('CREATE %sTABLE %s%s AS ' % (temp, ifnotexists, qt))
compiler.compile_query(bdb, phrase.query, out)
winders, unwinders = out.getwindings()
with compiler.bayesdb_wind(bdb, winders, unwinders):
bdb.sql_execute(out.getvalue(), out.getbindings())
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateTabCsv):
with bdb.savepoint():
table_exists = core.bayesdb_has_table(bdb, phrase.name)
if table_exists:
if phrase.ifnotexists:
return empty_cursor(bdb)
else:
raise BQLError(bdb, 'Table already exists: %s' %
(repr(phrase.name),))
bayesdb_read_csv_file(
bdb, phrase.name, phrase.csv, header=True, create=True)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropTab):
with bdb.savepoint():
sql = 'SELECT COUNT(*) FROM bayesdb_population WHERE tabname = ?'
cursor = bdb.sql_execute(sql, (phrase.name,))
if 0 < cursor_value(cursor):
raise BQLError(bdb, 'Table still in use by populations: %s' %
(repr(phrase.name),))
bdb.sql_execute('DELETE FROM bayesdb_column WHERE tabname = ?',
(phrase.name,))
ifexists = 'IF EXISTS ' if phrase.ifexists else ''
qt = sqlite3_quote_name(phrase.name)
return bdb.sql_execute('DROP TABLE %s%s' % (ifexists, qt))
if isinstance(phrase, ast.AlterTab):
with bdb.savepoint():
table = phrase.table
if not core.bayesdb_has_table(bdb, table):
raise BQLError(bdb, 'No such table: %s' % (repr(table),))
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterTabRenameTab):
# If the names differ only in case, we have to do
# some extra work because SQLite will reject the
# table rename. Note that we may even have table
# == cmd.name here, but if the stored table name
# differs in case from cmd.name, we want to update
# it anyway.
if casefold(table) == casefold(cmd.name):
# Go via a temporary table.
temp = table + '_temp'
while core.bayesdb_has_table(bdb, temp):
temp += '_temp'
rename_table(bdb, table, temp)
rename_table(bdb, temp, cmd.name)
else:
# Make sure nothing else has this name and
# rename it.
if core.bayesdb_has_table(bdb, cmd.name):
raise BQLError(bdb,
'Name already defined as table: %s'
% (repr(cmd.name),))
rename_table(bdb, table, cmd.name)
# If table has implicit population, rename it too.
if core.bayesdb_table_has_implicit_population(
bdb, cmd.name):
populations = \
core.bayesdb_table_populations(bdb, cmd.name)
assert len(populations) == 1
population_name = core.bayesdb_population_name(
bdb, populations[0])
qt = sqlite3_quote_name(cmd.name)
qp = sqlite3_quote_name(population_name)
bdb.execute('ALTER POPULATION %s RENAME TO %s'
% (qp, qt))
# Remember the new name for subsequent commands.
table = cmd.name
elif isinstance(cmd, ast.AlterTabRenameCol):
# XXX Need to deal with this in the compiler.
raise NotImplementedError('Renaming columns'
' not yet implemented.')
# Make sure the old name exist and the new name does not.
old_folded = casefold(cmd.old)
new_folded = casefold(cmd.new)
if old_folded != new_folded:
if not core.bayesdb_table_has_column(bdb, table,
cmd.old):
raise BQLError(bdb, 'No such column in table %s'
': %s' %
(repr(table), repr(cmd.old)))
if core.bayesdb_table_has_column(bdb, table, cmd.new):
raise BQLError(bdb, 'Column already exists'
' in table %s: %s' %
(repr(table), repr(cmd.new)))
# Update bayesdb_column. Everything else refers
# to columns by (tabname, colno) pairs rather than
# by names.
update_column_sql = '''
UPDATE bayesdb_column SET name = :new
WHERE tabname = :table AND name = :old
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_column_sql, {
'table': table,
'old': cmd.old,
'new': cmd.new,
})
assert bdb._sqlite3.totalchanges() - total_changes == 1
# ...except backends may have the (case-folded) name cached.
if old_folded != new_folded:
populations_sql = '''
SELECT id FROM bayesdb_population WHERE tabname = ?
'''
cursor = bdb.sql_execute(populations_sql, (table,))
generators = [
core.bayesdb_population_generators(
bdb, population_id)
for (population_id,) in cursor
]
for generator_id in set(generators):
backend = core.bayesdb_generator_backend(bdb,
generator_id)
backend.rename_column(bdb, generator_id,
old_folded, new_folded)
else:
assert False, 'Invalid alter table command: %s' % \
(cmd,)
return empty_cursor(bdb)
if isinstance(phrase, ast.GuessSchema):
if not core.bayesdb_has_table(bdb, phrase.table):
raise BQLError(bdb, 'No such table : %s' % phrase.table)
out = compiler.Output(0, {}, {})
with bdb.savepoint():
qt = sqlite3_quote_name(phrase.table)
temptable = bdb.temp_table_name()
qtt = sqlite3_quote_name(temptable)
cursor = bdb.sql_execute('SELECT * FROM %s' % (qt,))
column_names = [d[0] for d in cursor.description]
rows = cursor.fetchall()
stattypes = bayesdb_guess_stattypes(column_names, rows)
distinct_value_counts = [
len(set([row[i] for row in rows]))
for i in range(len(column_names))
]
out.winder('''
CREATE TEMP TABLE %s (
column TEXT,
stattype TEXT,
num_distinct INTEGER,
reason TEXT
)
''' % (qtt,), ())
for cn, st, ct in zip(column_names, stattypes, distinct_value_counts):
out.winder('''
INSERT INTO %s VALUES (?, ?, ?, ?)
''' % (qtt), (cn, st[0], ct, st[1]))
out.write('SELECT * FROM %s' % (qtt,))
out.unwinder('DROP TABLE %s' % (qtt,), ())
winders, unwinders = out.getwindings()
return execute_wound(
bdb, winders, unwinders, out.getvalue(), out.getbindings())
if isinstance(phrase, ast.CreatePop):
with bdb.savepoint():
_create_population(bdb, phrase)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropPop):
with bdb.savepoint():
if not core.bayesdb_has_population(bdb, phrase.name):
if phrase.ifexists:
return empty_cursor(bdb)
raise BQLError(bdb, 'No such population: %r' % (phrase.name,))
population_id = core.bayesdb_get_population(bdb, phrase.name)
generator_ids = core.bayesdb_population_generators(
bdb, population_id)
if generator_ids:
generators = [core.bayesdb_generator_name(bdb, gid)
for gid in generator_ids]
raise BQLError(bdb, 'Population %r still has generators: %r' %
(phrase.name, generators))
# XXX helpful error checking if generators still exist
# XXX check change counts
bdb.sql_execute('''
DELETE FROM bayesdb_variable WHERE population_id = ?
''', (population_id,))
bdb.sql_execute('''
DELETE FROM bayesdb_population WHERE id = ?
''', (population_id,))
return empty_cursor(bdb)
if isinstance(phrase, ast.AlterPop):
with bdb.savepoint():
population = phrase.population
if not core.bayesdb_has_population(bdb, population):
raise BQLError(bdb, 'No such population: %s' %
(repr(population),))
population_id = core.bayesdb_get_population(bdb, population)
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterPopRenamePop):
table = core.bayesdb_population_table(bdb, population_id)
# Prevent renaming of implicit population directly, unless
# being called by ast.AlterTabRenameTab in which case the
# table name and population name will not be matching.
if core.bayesdb_population_is_implicit(bdb, population_id) \
and casefold(population) == casefold(table):
raise BQLError(bdb, 'Cannot rename implicit'
'population %s; rename base table instead'
% (population,))
# Make sure nothing else has this name.
if casefold(population) != casefold(cmd.name):
if core.bayesdb_has_population(bdb, cmd.name):
raise BQLError(bdb,
'Name already defined as population' ': %s'
% (repr(cmd.name),))
# Update bayesdb_population. Everything else
# refers to it by id.
update_generator_sql = '''
UPDATE bayesdb_population SET name = ? WHERE id = ?
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_generator_sql,
(cmd.name, population_id))
assert bdb._sqlite3.totalchanges() - total_changes == 1
# If population has implicit generator, rename it too.
if core.bayesdb_population_has_implicit_generator(
bdb, population_id):
generators = core.bayesdb_population_generators(
bdb, population_id)
assert len(generators) == 1
generator_name = core.bayesdb_generator_name(
bdb, generators[0])
qp = sqlite3_quote_name(cmd.name)
qg = sqlite3_quote_name(generator_name)
bdb.execute('ALTER GENERATOR %s RENAME TO %s'
% (qg, qp,))
# Remember the new name for subsequent commands.
population = cmd.name
elif isinstance(cmd, ast.AlterPopAddVar):
# Ensure column exists in base table.
table = core.bayesdb_population_table(bdb, population_id)
if not core.bayesdb_table_has_column(
bdb, table, cmd.name):
raise BQLError(bdb,
'No such variable in base table: %s'
% (cmd.name))
# Ensure variable not already in population.
if core.bayesdb_has_variable(
bdb, population_id, None, cmd.name):
raise BQLError(bdb,
'Variable already in population: %s'
% (cmd.name))
# Ensure there is at least observation in the column.
qt = sqlite3_quote_name(table)
qc = sqlite3_quote_name(cmd.name)
cursor = bdb.sql_execute(
'SELECT COUNT(*) FROM %s WHERE %s IS NOT NULL' %
(qt, qc))
if cursor_value(cursor) == 0:
raise BQLError(bdb,
'Cannot add variable without any values: %s'
% (cmd.name))
# If stattype is None, guess.
if cmd.stattype is None:
cursor = bdb.sql_execute(
'SELECT %s FROM %s' % (qc, qt))
rows = cursor.fetchall()
[stattype, reason] = bayesdb_guess_stattypes(
[cmd.name], rows)[0]
# Fail if trying to model a key.
if stattype == 'key':
raise BQLError(bdb,
'Values in column %s appear to be keys.'
% (cmd.name,))
# Fail if cannot determine a stattype.
elif stattype == 'ignore':
raise BQLError(bdb,
'Failed to determine a stattype for %s, '
'please specify one manually.' % (cmd.name,))
# If user specified stattype, ensure it exists.
elif not core.bayesdb_has_stattype(bdb, cmd.stattype):
raise BQLError(bdb,
'Invalid stattype: %s' % (cmd.stattype))
else:
stattype = cmd.stattype
# Check that strings are not being modeled as numerical.
if stattype == 'numerical' \
and _column_contains_string(bdb, table, cmd.name):
raise BQLError(bdb,
'Numerical column contains string values: %r '
% (qc,))
with bdb.savepoint():
# Add the variable to the population.
core.bayesdb_add_variable(
bdb, population_id, cmd.name, stattype)
colno = core.bayesdb_variable_number(
bdb, population_id, None, cmd.name)
# Add the variable to each (initialized) generator in
# the population.
generator_ids = filter(
lambda g: core.bayesdb_generator_modelnos(bdb, g),
core.bayesdb_population_generators(
bdb, population_id),
)
for generator_id in generator_ids:
backend = core.bayesdb_generator_backend(
bdb, generator_id)
backend.add_column(bdb, generator_id, colno)
elif isinstance(cmd, ast.AlterPopStatType):
# Check the no generators are defined for this population.
generators = core.bayesdb_population_generators(
bdb, population_id)
if generators:
raise BQLError(bdb,
'Cannot update statistical types for population '
'%s, it has generators: %s'
% (repr(population), repr(generators),))
# Check all the variables are in the population.
unknown = [
c for c in cmd.names if not
core.bayesdb_has_variable(bdb, population_id, None, c)
]
if unknown:
raise BQLError(bdb,
'No such variables in population: %s'
% (repr(unknown)))
# Check the statistical type is valid.
if not core.bayesdb_has_stattype(bdb, cmd.stattype):
raise BQLError(bdb,
'Invalid statistical type: %r'
% (repr(cmd.stattype),))
# Check that strings are not being modeled as numerical.
if cmd.stattype == 'numerical':
table = core.bayesdb_population_table(
bdb, population_id)
numerical_string_vars = [
col for col in cmd.names
if _column_contains_string(bdb, table, col)
]
if numerical_string_vars:
raise BQLError(bdb,
'Columns with string values modeled as '
'numerical: %r' % (numerical_string_vars,))
# Perform the stattype update.
colnos = [
core.bayesdb_variable_number(
bdb, population_id, None, c) for c in cmd.names
]
qcolnos = ','.join('%d' % (colno,) for colno in colnos)
update_stattype_sql = '''
UPDATE bayesdb_variable SET stattype = ?
WHERE population_id = ? AND colno IN (%s)
''' % (qcolnos,)
bdb.sql_execute(
update_stattype_sql,
(casefold(cmd.stattype), population_id,))
else:
assert False, 'Invalid ALTER POPULATION command: %s' % \
(repr(cmd),)
return empty_cursor(bdb)
if isinstance(phrase, ast.CreateGen):
# Find the population.
if not core.bayesdb_has_population(bdb, phrase.population):
raise BQLError(bdb, 'No such population: %r' %
(phrase.population,))
population_id = core.bayesdb_get_population(bdb, phrase.population)
# Find the backend, or use the default.
backend_name = phrase.backend
if phrase.backend is None:
backend_name = 'cgpm'
if backend_name not in bdb.backends:
raise BQLError(bdb, 'No such backend: %s' %
(repr(backend_name),))
backend = bdb.backends[backend_name]
# Retrieve the (possibility implicit) generator name.
generator_name = phrase.name or phrase.population
implicit = 1 if phrase.name is None else 0
with bdb.savepoint():
if core.bayesdb_has_generator(bdb, population_id, generator_name):
if not phrase.ifnotexists:
raise BQLError(
bdb, 'Name already defined as generator: %s' %
(repr(generator_name),))
else:
# Insert a record into bayesdb_generator and get the
# assigned id.
bdb.sql_execute('''
INSERT INTO bayesdb_generator
(name, population_id, backend, implicit)
VALUES (?, ?, ?, ?)
''', (generator_name, population_id, backend.name(), implicit))
generator_id = core.bayesdb_get_generator(
bdb, population_id, generator_name)
# Do any backend-specific initialization.
backend.create_generator(bdb, generator_id, phrase.schema)
# All done. Nothing to return.
return empty_cursor(bdb)
if isinstance(phrase, ast.DropGen):
with bdb.savepoint():
if not core.bayesdb_has_generator(bdb, None, phrase.name):
if phrase.ifexists:
return empty_cursor(bdb)
raise BQLError(bdb, 'No such generator: %s' %
(repr(phrase.name),))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.name)
backend = core.bayesdb_generator_backend(bdb, generator_id)
# Backend-specific destruction.
backend.drop_generator(bdb, generator_id)
# Drop latent variables, models, and, finally, generator.
drop_columns_sql = '''
DELETE FROM bayesdb_variable WHERE generator_id = ?
'''
bdb.sql_execute(drop_columns_sql, (generator_id,))
drop_model_sql = '''
DELETE FROM bayesdb_generator_model WHERE generator_id = ?
'''
bdb.sql_execute(drop_model_sql, (generator_id,))
drop_generator_sql = '''
DELETE FROM bayesdb_generator WHERE id = ?
'''
bdb.sql_execute(drop_generator_sql, (generator_id,))
return empty_cursor(bdb)
if isinstance(phrase, ast.AlterGen):
with bdb.savepoint():
generator = phrase.generator
if not core.bayesdb_has_generator(bdb, None, generator):
raise BQLError(bdb, 'No such generator: %s' %
(repr(generator),))
generator_id = core.bayesdb_get_generator(bdb, None, generator)
cmds_generic = []
for cmd in phrase.commands:
if isinstance(cmd, ast.AlterGenRenameGen):
population_id = core.bayesdb_generator_population(
bdb, generator_id)
population = core.bayesdb_population_name(
bdb, population_id)
# Prevent renaming of implicit generator directly, unless
# being called by ast.AlterPopRenamePop in which case the
# population name and generator name will not be matching.
if core.bayesdb_population_is_implicit(bdb, generator_id) \
and casefold(generator) == casefold(population):
raise BQLError(bdb, 'Cannot rename implicit '
'generator; rename base population instead')
# Disable modelnos with AlterGenRenameGen.
if phrase.modelnos is not None:
raise BQLError(bdb, 'Cannot specify models for RENAME')
# Make sure nothing else has this name.
if casefold(generator) != casefold(cmd.name):
if core.bayesdb_has_generator(bdb, None, cmd.name):
raise BQLError(bdb, 'Name already defined'
' as generator: %s' %
(repr(cmd.name),))
# Update bayesdb_generator. Everything else
# refers to it by id.
update_generator_sql = '''
UPDATE bayesdb_generator SET name = ? WHERE id = ?
'''
total_changes = bdb._sqlite3.totalchanges()
bdb.sql_execute(update_generator_sql,
(cmd.name, generator_id))
assert bdb._sqlite3.totalchanges() - total_changes == 1
# Remember the new name for subsequent commands.
generator = cmd.name
elif isinstance(cmd, ast.AlterGenGeneric):
cmds_generic.append(cmd.command)
else:
assert False, 'Invalid ALTER GENERATOR command: %s' % \
(repr(cmd),)
if cmds_generic:
modelnos = phrase.modelnos
modelnos_invalid = None if modelnos is None else [
modelno for modelno in modelnos if not
core.bayesdb_generator_has_model(bdb, generator_id, modelno)
]
if modelnos_invalid:
raise BQLError(bdb,
'No such models in generator %s: %s' %
(repr(phrase.generator), repr(modelnos)))
# Call generic alternations on the backend.
backend = core.bayesdb_generator_backend(bdb, generator_id)
backend.alter(bdb, generator_id, modelnos, cmds_generic)
return empty_cursor(bdb)
if isinstance(phrase, ast.InitModels):
if not core.bayesdb_has_generator(bdb, None, phrase.generator):
raise BQLError(bdb, 'No such generator: %s' %
(phrase.generator,))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.generator)
modelnos = range(phrase.nmodels)
with bdb.savepoint():
# Find the model numbers. Omit existing ones for
# ifnotexists; reject existing ones otherwise.
if phrase.ifnotexists:
modelnos = set(modelno for modelno in modelnos
if not core.bayesdb_generator_has_model(bdb, generator_id,
modelno))
else:
existing = set(modelno for modelno in modelnos
if core.bayesdb_generator_has_model(bdb, generator_id,
modelno))
if 0 < len(existing):
raise BQLError(bdb, 'Generator %s already has models: %s' %
(repr(phrase.generator), sorted(existing)))
# Stop now if there's nothing to initialize.
if len(modelnos) == 0:
return
# Create the bayesdb_generator_model records.
modelnos = sorted(modelnos)
insert_model_sql = '''
INSERT INTO bayesdb_generator_model
(generator_id, modelno)
VALUES (:generator_id, :modelno)
'''
for modelno in modelnos:
bdb.sql_execute(insert_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
# Do backend-specific initialization.
backend = core.bayesdb_generator_backend(bdb, generator_id)
backend.initialize_models(bdb, generator_id, modelnos)
return empty_cursor(bdb)
if isinstance(phrase, ast.AnalyzeModels):
# WARNING: It is the backend's responsibility to work in a
# transaction.
#
# WARNING: It is the backend's responsibility to update the
# iteration count in bayesdb_generator_model records.
#
# We do this so that the backend can save incremental
# progress in case of ^C in the middle.
#
# XXX Put these warning somewhere more appropriate.
if not core.bayesdb_has_generator(bdb, None, phrase.generator):
raise BQLError(bdb, 'No such generator: %s' %
(phrase.generator,))
generator_id = core.bayesdb_get_generator(bdb, None, phrase.generator)
backend = core.bayesdb_generator_backend(bdb, generator_id)
# XXX Should allow parameters for iterations and ckpt/iter.
backend.analyze_models(bdb, generator_id,
modelnos=phrase.modelnos,
iterations=phrase.iterations,
max_seconds=phrase.seconds,
ckpt_iterations=phrase.ckpt_iterations,
ckpt_seconds=phrase.ckpt_seconds,
program=phrase.program)
return empty_cursor(bdb)
if isinstance(phrase, ast.DropModels):
with bdb.savepoint():
generator_id = core.bayesdb_get_generator(
bdb, None, phrase.generator)
backend = core.bayesdb_generator_backend(bdb, generator_id)
modelnos = None
if phrase.modelnos is not None:
lookup_model_sql = '''
SELECT COUNT(*) FROM bayesdb_generator_model
WHERE generator_id = :generator_id
AND modelno = :modelno
'''
modelnos = sorted(list(phrase.modelnos))
for modelno in modelnos:
cursor = bdb.sql_execute(lookup_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
if cursor_value(cursor) == 0:
raise BQLError(bdb, 'No such model'
' in generator %s: %s' %
(repr(phrase.generator), repr(modelno)))
backend.drop_models(bdb, generator_id, modelnos=modelnos)
if modelnos is None:
drop_models_sql = '''
DELETE FROM bayesdb_generator_model WHERE generator_id = ?
'''
bdb.sql_execute(drop_models_sql, (generator_id,))
else:
drop_model_sql = '''
DELETE FROM bayesdb_generator_model
WHERE generator_id = :generator_id
AND modelno = :modelno
'''
for modelno in modelnos:
bdb.sql_execute(drop_model_sql, {
'generator_id': generator_id,
'modelno': modelno,
})
return empty_cursor(bdb)
if isinstance(phrase, ast.Regress):
# Retrieve the population.
if not core.bayesdb_has_population(bdb, phrase.population):
raise BQLError(bdb, 'No such population: %r' % (phrase.population,))
population_id = core.bayesdb_get_population(bdb, phrase.population)
# Retrieve the generator
generator_id = None
if phrase.generator:
if not core.bayesdb_has_generator(bdb, population_id,
phrase.generator):
raise BQLError(bdb,
'No such generator: %r' % (phrase.generator,))
generator_id = core.bayesdb_get_generator(
bdb, population_id, phrase.generator)
# Retrieve the target variable.
if not core.bayesdb_has_variable(
bdb, population_id, None, phrase.target):
raise BQLError(bdb, 'No such variable: %r' % (phrase.target,))
colno_target = core.bayesdb_variable_number(
bdb, population_id, None, phrase.target)
stattype = core.bayesdb_variable_stattype(bdb, population_id,
generator_id, colno_target)
if stattype != 'numerical':
raise BQLError(bdb,
'Target variable is not numerical: %r' % (phrase.target,))
# Build the given variables.
if any(isinstance(col, ast.SelColAll) for col in phrase.givens):
# Using * is not allowed to be mixed with other variables.
if len(phrase.givens) > 1:
raise BQLError(bdb, 'Cannot use (*) with other givens.')
colno_givens = core.bayesdb_variable_numbers(
bdb, population_id, None)
else:
if any(isinstance(col, ast.SelColSub) for col in phrase.givens):
# Subexpression needs special compiling.
out = compiler.Output(n_numpar, nampar_map, bindings)
bql_compiler = compiler.BQLCompiler_None()
givens = compiler.expand_select_columns(
bdb, phrase.givens, True, bql_compiler, out)
else:
givens = phrase.givens
colno_givens = [
core.bayesdb_variable_number(
bdb, population_id, None, given.expression.column)
for given in givens
]
# Build the arguments to bqlfn.bayesdb_simulate.
colno_givens_unique = set(
colno for colno in colno_givens if colno!= colno_target
)
if len(colno_givens_unique) == 0:
raise BQLError(bdb, 'No matching given columns.')
constraints = []
colnos = [colno_target] + list(colno_givens_unique)
nsamp = 100 if phrase.nsamp is None else phrase.nsamp.value.value
modelnos = None if phrase.modelnos is None else str(phrase.modelnos)
rows = bqlfn.bayesdb_simulate(
bdb, population_id, generator_id, modelnos, constraints,
colnos, numpredictions=nsamp)
# Retrieve the stattypes.
stattypes = [
core.bayesdb_variable_stattype(
bdb, population_id, generator_id, colno_given)
for colno_given in colno_givens_unique
]
# Separate the target values from the given values.
target_values = [row[0] for row in rows]
given_values = [row[1:] for row in rows]
given_names = [
core.bayesdb_variable_name(bdb, population_id, generator_id, given)
for given in colno_givens_unique
]
# Compute the coefficients. The import to regress_ols is here since the
# feature depends on pandas + sklearn, so avoid module-wide import.
from bayeslite.regress import regress_ols
coefficients = regress_ols(
target_values, given_values, given_names, stattypes)
# Store the results in a winder.
temptable = bdb.temp_table_name()
qtt = sqlite3_quote_name(temptable)
out = compiler.Output(0, {}, {})
out.winder('''
CREATE TEMP TABLE %s (variable TEXT, coefficient REAL);
''' % (qtt,), ())
for variable, coef in coefficients:
out.winder('''
INSERT INTO %s VALUES (?, ?)
''' % (qtt), (variable, coef,))
out.write('SELECT * FROM %s ORDER BY variable' % (qtt,))
out.unwinder('DROP TABLE %s' % (qtt,), ())
winders, unwinders = out.getwindings()
return execute_wound(
bdb, winders, unwinders, out.getvalue(), out.getbindings())
assert False # XXX
def _create_population(bdb, phrase):
# Retrieve the (possibility implicit) population name.
population_name = phrase.name or phrase.table
implicit = 1 if phrase.name is None else 0
# Handle IF NOT EXISTS.
if core.bayesdb_has_population(bdb, population_name):
if phrase.ifnotexists:
return
else:
raise BQLError(bdb, 'Name already defined as population: %r' %
(population_name,))
# Make sure the bayesdb_column table knows all the columns of the
# underlying table.
core.bayesdb_table_guarantee_columns(bdb, phrase.table)
# Retrieve all columns from the base table. The user is required to provide
# a strategy for each single variable, either MODEL, IGNORE, or GUESS.
base_table_columns = core.bayesdb_table_column_names(bdb, phrase.table)
# Create the population record and get the assigned id.
bdb.sql_execute('''
INSERT INTO bayesdb_population (name, tabname, implicit)
VALUES (?, ?, ?)
''', (population_name, phrase.table, implicit))
population_id = core.bayesdb_get_population(bdb, population_name)
# Extract the population column names and stattypes as pairs.
pop_model_vars = list(itertools.chain.from_iterable(
[[(name, s.stattype) for name in s.names]
for s in phrase.schema if isinstance(s, ast.PopModelVars)]))
# Extract the ignored columns.
pop_ignore_vars = list(itertools.chain.from_iterable(
[[(name, 'ignore') for name in s.names]
for s in phrase.schema if isinstance(s, ast.PopIgnoreVars)]))
# Extract the columns to guess.
pop_guess = list(itertools.chain.from_iterable(
[s.names for s in phrase.schema if isinstance(s, ast.PopGuessVars)]))
if '*' in pop_guess:
# Do not allow * to coincide with other variables.
if len(pop_guess) > 1:
raise BQLError(
bdb, 'Cannot use wildcard GUESS with variables names: %r'
% (pop_guess, ))
# Retrieve all variables in the base table.
avoid = set(casefold(t[0]) for t in pop_model_vars + pop_ignore_vars)
pop_guess = [t for t in base_table_columns if casefold(t) not in avoid]
# Perform the guessing.
if pop_guess:
qt = sqlite3_quote_name(phrase.table)
qcns = ','.join(map(sqlite3_quote_name, pop_guess))
cursor = bdb.sql_execute('SELECT %s FROM %s' % (qcns, qt))
rows = cursor.fetchall()
# XXX This function returns a stattype called `key`, which we will add
# to the pop_ignore_vars.
pop_guess_stattypes = bayesdb_guess_stattypes(pop_guess, rows)
pop_guess_vars = zip(pop_guess, [st[0] for st in pop_guess_stattypes])
migrate = [(col, st) for col, st in pop_guess_vars if st=='key']
for col, st in migrate:
pop_guess_vars.remove((col, st))
pop_ignore_vars.append((col, 'ignore'))
else:
pop_guess_vars = []
# Ensure no string-valued variables are being modeled as numerical.
numerical_string_vars = [
var for var, stattype in pop_model_vars
if stattype == 'numerical'
and _column_contains_string(bdb, phrase.table, var)
]
if numerical_string_vars:
raise BQLError(bdb,
'Column(s) with string values modeled as numerical: %r'
% (numerical_string_vars, ))
# Pool all the variables and statistical types together.
pop_all_vars = pop_model_vars + pop_ignore_vars + pop_guess_vars
# Check that everyone in the population is modeled.
# `known` contains all the variables for which a policy is known.
known = [casefold(t[0]) for t in pop_all_vars]
not_found = [t for t in base_table_columns if casefold(t) not in known]
if not_found:
raise BQLError(
bdb, 'Cannot determine a modeling policy for variables: %r'
% (not_found, ))
# Check
# - for duplicates,
# - for nonexistent columns,
# - for invalid statistical types.
seen_variables = set()
duplicates = set()
missing = set()
invalid = set()
stattype_sql = '''
SELECT COUNT(*) FROM bayesdb_stattype WHERE name = :stattype
'''
for nm, st in pop_all_vars:
name = casefold(nm)
stattype = casefold(st)
if name in seen_variables:
duplicates.add(name)
continue
if not core.bayesdb_table_has_column(bdb, phrase.table, nm):
missing.add(name)
continue
cursor = bdb.sql_execute(stattype_sql, {'stattype': stattype})
if cursor_value(cursor) == 0 and stattype != 'ignore':
invalid.add(stattype)
continue
seen_variables.add(nm)
# XXX Would be nice to report these simultaneously.
if missing:
raise BQLError(bdb, 'No such columns in table %r: %r' %
(phrase.table, list(missing)))
if duplicates:
raise BQLError(bdb, 'Duplicate column names: %r' % (list(duplicates),))
if invalid:
raise BQLError(bdb, 'Invalid statistical types: %r' % (list(invalid),))
# Insert variable records.
for nm, st in pop_all_vars:
name = casefold(nm)
stattype = casefold(st)
if stattype == 'ignore':
continue
core.bayesdb_add_variable(bdb, population_id, name, stattype)
def _column_contains_string(bdb, table, column):
qt = sqlite3_quote_name(table)
qc = sqlite3_quote_name(column)
rows = bdb.sql_execute('SELECT %s FROM %s' % (qc, qt))
return any(isinstance(r[0], unicode) for r in rows)
def rename_table(bdb, old, new):
assert core.bayesdb_has_table(bdb, old)
assert not core.bayesdb_has_table(bdb, new)
# Rename the SQL table.
qo = sqlite3_quote_name(old)
qn = sqlite3_quote_name(new)
rename_sql = 'ALTER TABLE %s RENAME TO %s' % (qo, qn)
bdb.sql_execute(rename_sql)
# Update bayesdb_column to use the new name.
update_columns_sql = '''
UPDATE bayesdb_column SET tabname = ? WHERE tabname = ?
'''
bdb.sql_execute(update_columns_sql, (new, old))
# Update bayesdb_population to use the new name.
update_populations_sql = '''
UPDATE bayesdb_population SET tabname = ? WHERE tabname = ?
'''
bdb.sql_execute(update_populations_sql, (new, old))
def empty_cursor(bdb):
return None
def execute_wound(bdb, winders, unwinders, sql, bindings):
if len(winders) == 0 and len(unwinders) == 0:
return bdb.sql_execute(sql, bindings)
with bdb.savepoint():
for (wsql, wbindings) in winders:
bdb.sql_execute(wsql, wbindings)
try:
return WoundCursor(bdb, bdb.sql_execute(sql, bindings), unwinders)
except:
for (usql, ubindings) in unwinders:
bdb.sql_execute(usql, ubindings)
raise
class BayesDBCursor(object):
"""Cursor for a BQL or SQL query from a BayesDB."""
def __init__(self, bdb, cursor):
self._bdb = bdb
self._cursor = cursor
# XXX Must save the description early because apsw discards it
# after we have iterated over all rows -- or if there are no
# rows, discards it immediately!
try:
self._description = cursor.description
except apsw.ExecutionCompleteError:
self._description = []
else:
assert self._description is not None
if self._description is None:
self._description = []
def __iter__(self):
return self
def next(self):
return self._cursor.next()
def fetchone(self):
return self._cursor.fetchone()
def fetchvalue(self):
return cursor_value(self)
def fetchmany(self, size=1):
with txn.bayesdb_caching(self._bdb):
return self._cursor.fetchmany(size=size)
def fetchall(self):
with txn.bayesdb_caching(self._bdb):
return self._cursor.fetchall()
@property
def connection(self):
return self._bdb
@property
def lastrowid(self):
return self._bdb.last_insert_rowid()
@property
def description(self):
return self._description
class WoundCursor(BayesDBCursor):
def __init__(self, bdb, cursor, unwinders):
self._unwinders = unwinders
super(WoundCursor, self).__init__(bdb, cursor)
def __del__(self):
del self._cursor
# If the database is still open, we need to undo the effects
# of the cursor when done. But the effects are (intended to
# be) in-memory only, so otherwise, if the database is closed,
# we need not do anything.
#
# XXX Name the question of whether it's closed a little less
# kludgily. (But that might encourage people outside to
# depend on that, which is not such a great idea.)
if self._bdb._sqlite3 is not None:
for sql, bindings in reversed(self._unwinders):
self._bdb.sql_execute(sql, bindings)
# Apparently object doesn't have a __del__ method.
#super(WoundCursor, self).__del__()
| apache-2.0 |
adamgreenhall/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/types/test_generic.py | 7 | 2255 | # -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.types import generic as gt
_multiprocess_can_split_ = True
class TestABCClasses(tm.TestCase):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
def test_abc_types(self):
self.assertIsInstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
self.assertIsInstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
self.assertIsInstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
self.assertIsInstance(self.multi_index, gt.ABCMultiIndex)
self.assertIsInstance(self.datetime_index, gt.ABCDatetimeIndex)
self.assertIsInstance(self.timedelta_index, gt.ABCTimedeltaIndex)
self.assertIsInstance(self.period_index, gt.ABCPeriodIndex)
self.assertIsInstance(self.categorical_df.index,
gt.ABCCategoricalIndex)
self.assertIsInstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
self.assertIsInstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
self.assertIsInstance(pd.Series([1, 2, 3]), gt.ABCSeries)
self.assertIsInstance(self.df, gt.ABCDataFrame)
self.assertIsInstance(self.df.to_panel(), gt.ABCPanel)
self.assertIsInstance(self.sparse_series, gt.ABCSparseSeries)
self.assertIsInstance(self.sparse_array, gt.ABCSparseArray)
self.assertIsInstance(self.categorical, gt.ABCCategorical)
self.assertIsInstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
qifeigit/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
pauljxtan/mathsci | examples/integrate_lorenz_attractor.py | 2 | 1350 | #!/usr/bin/env python
"""
Integrating the Lorenz attractor.
"""
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from souffle.math import chaos, odeint
def main():
# Set integration function
f = chaos.lorenz_attractor
# Length of timestep
dt = 0.01
# Initial time
t0 = 0.0
# Initial coordinates
X0 = [0.01, 0.01, 0.01]
# Integration constants
sigma = 10.0
beta = 8.0 / 3.0
rho = 28.0
# Integrate it...
# Using Euler:
#lorenz = odeint.Euler(f, t0, X0, sigma=sigma, beta=beta, rho=rho)
#lorenz.integrate(dt, 10000, True)
# Using RK4:
#lorenz = odeint.RK4(f, t0, X0, sigma=sigma, beta=beta, rho=rho)
#lorenz.integrate(dt, 10000, True)
# Using Bulirsch-Stoer:
lorenz = odeint.BulSto(f, t0, X0, sigma=sigma, beta=beta, rho=rho)
lorenz.integrate(dt, 10000, 1.0e-6, True)
# Unpack data
t = lorenz.t
x, y, z = lorenz.unpack()
# Plot time series
fig1 = pyplot.figure()
fig1_sp1 = fig1.add_subplot(111)
fig1_sp1.plot(t, x, label="x")
fig1_sp1.plot(t, y, label="y")
fig1_sp1.plot(t, z, label="z")
fig1_sp1.legend()
# Plot in 3D
fig2 = pyplot.figure()
fig2.sp1 = fig2.add_subplot(111, projection="3d")
fig2.sp1.plot(x, y, z)
pyplot.show()
if __name__ == "__main__":
main()
| mit |
cdcapano/pycbc | pycbc/results/scatter_histograms.py | 4 | 29832 | # Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate figures with scatter plots and histograms.
"""
import itertools
import sys
import numpy
import scipy.stats
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
if 'matplotlib.backends' not in sys.modules: # nopep8
matplotlib.use('agg')
from matplotlib import (offsetbox, pyplot, gridspec)
from pycbc.results import str_utils
from pycbc.io import FieldArray
def create_axes_grid(parameters, labels=None, height_ratios=None,
width_ratios=None, no_diagonals=False):
"""Given a list of parameters, creates a figure with an axis for
every possible combination of the parameters.
Parameters
----------
parameters : list
Names of the variables to be plotted.
labels : {None, dict}, optional
A dictionary of parameters -> parameter labels.
height_ratios : {None, list}, optional
Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
width_ratios : {None, list}, optional
Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
no_diagonals : {False, bool}, optional
Do not produce axes for the same parameter on both axes.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
elif any(p not in labels for p in parameters):
raise ValueError("labels must be provided for all parameters")
# Create figure with adequate size for number of parameters.
ndim = len(parameters)
if no_diagonals:
ndim -= 1
if ndim < 3:
fsize = (8, 7)
else:
fsize = (ndim*3 - 1, ndim*3 - 2)
fig = pyplot.figure(figsize=fsize)
# create the axis grid
gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios,
height_ratios=height_ratios,
wspace=0.05, hspace=0.05)
# create grid of axis numbers to easily create axes in the right locations
axes = numpy.arange(ndim**2).reshape((ndim, ndim))
# Select possible combinations of plots and establish rows and columns.
combos = list(itertools.combinations(parameters, 2))
# add the diagonals
if not no_diagonals:
combos += [(p, p) for p in parameters]
# create the mapping between parameter combos and axes
axis_dict = {}
# cycle over all the axes, setting thing as needed
for nrow in range(ndim):
for ncolumn in range(ndim):
ax = pyplot.subplot(gs[axes[nrow, ncolumn]])
# map to a parameter index
px = parameters[ncolumn]
if no_diagonals:
py = parameters[nrow+1]
else:
py = parameters[nrow]
if (px, py) in combos:
axis_dict[px, py] = (ax, nrow, ncolumn)
# x labels only on bottom
if nrow + 1 == ndim:
ax.set_xlabel('{}'.format(labels[px]), fontsize=18)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
# y labels only on left
if ncolumn == 0:
ax.set_ylabel('{}'.format(labels[py]), fontsize=18)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
else:
# make non-used axes invisible
ax.axis('off')
return fig, axis_dict
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde
def create_density_plot(xparam, yparam, samples, plot_density=True,
plot_contours=True, percentiles=None, cmap='viridis',
contour_color=None, xmin=None, xmax=None,
ymin=None, ymax=None, exclude_region=None,
fig=None, ax=None, use_kombine=False):
"""Computes and plots posterior density and confidence intervals using the
given samples.
Parameters
----------
xparam : string
The parameter to plot on the x-axis.
yparam : string
The parameter to plot on the y-axis.
samples : dict, numpy structured array, or FieldArray
The samples to plot.
plot_density : {True, bool}
Plot a color map of the density.
plot_contours : {True, bool}
Plot contours showing the n-th percentiles of the density.
percentiles : {None, float or array}
What percentile contours to draw. If None, will plot the 50th
and 90th percentiles.
cmap : {'viridis', string}
The name of the colormap to use for the density plot.
contour_color : {None, string}
What color to make the contours. Default is white for density
plots and black for other plots.
xmin : {None, float}
Minimum value to plot on x-axis.
xmax : {None, float}
Maximum value to plot on x-axis.
ymin : {None, float}
Minimum value to plot on y-axis.
ymax : {None, float}
Maximum value to plot on y-axis.
exclue_region : {None, str}
Exclude the specified region when plotting the density or contours.
Must be a string in terms of `xparam` and `yparam` that is
understandable by numpy's logical evaluation. For example, if
`xparam = m_1` and `yparam = m_2`, and you want to exclude the region
for which `m_2` is greater than `m_1`, then exclude region should be
`'m_2 > m_1'`.
fig : {None, pyplot.figure}
Add the plot to the given figure. If None and ax is None, will create
a new figure.
ax : {None, pyplot.axes}
Draw plot on the given axis. If None, will create a new axis from
`fig`.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure the plot was made on.
ax : pyplot.axes
The axes the plot was drawn on.
"""
if percentiles is None:
percentiles = numpy.array([50., 90.])
percentiles = 100. - numpy.array(percentiles)
percentiles.sort()
if ax is None and fig is None:
fig = pyplot.figure()
if ax is None:
ax = fig.add_subplot(111)
# convert samples to array and construct kde
xsamples = samples[xparam]
ysamples = samples[yparam]
arr = numpy.vstack((xsamples, ysamples)).T
kde = construct_kde(arr, use_kombine=use_kombine)
# construct grid to evaluate on
if xmin is None:
xmin = xsamples.min()
if xmax is None:
xmax = xsamples.max()
if ymin is None:
ymin = ysamples.min()
if ymax is None:
ymax = ysamples.max()
npts = 100
X, Y = numpy.mgrid[
xmin:xmax:complex(0, npts), # pylint:disable=invalid-slice-index
ymin:ymax:complex(0, npts)] # pylint:disable=invalid-slice-index
pos = numpy.vstack([X.ravel(), Y.ravel()])
if use_kombine:
Z = numpy.exp(kde(pos.T).reshape(X.shape))
draw = kde.draw
else:
Z = kde(pos).T.reshape(X.shape)
draw = kde.resample
if exclude_region is not None:
# convert X,Y to a single FieldArray so we can use it's ability to
# evaluate strings
farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
Z[farr[exclude_region]] = 0.
if plot_density:
ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
aspect='auto', cmap=cmap, zorder=1)
if contour_color is None:
contour_color = 'w'
if plot_contours:
# compute the percentile values
resamps = kde(draw(int(npts**2)))
if use_kombine:
resamps = numpy.exp(resamps)
s = numpy.percentile(resamps, percentiles)
if contour_color is None:
contour_color = 'k'
# make linewidths thicker if not plotting density for clarity
if plot_density:
lw = 1
else:
lw = 2
ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
zorder=3)
# label contours
lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
fmt = dict(zip(ct.levels, lbls))
fs = 12
ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)
return fig, ax
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
linestyle='-',
title=True, expected_value=None,
expected_color='red', rotated=False,
plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
for val in plotp:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '${0}$'.format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# remove ticks and set limits
if rotated:
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):
""" Sets the title of the marginal histograms.
Parameters
----------
ax : Axes
The `Axes` instance for the plot.
fmt : str
The string to add to the title.
color : str
The color of the text to add to the title.
label : str
If title does not exist, then include label at beginning of the string.
rotated : bool
If `True` then rotate the text 270 degrees for sideways title.
"""
# get rotation angle of the title
rotation = 270 if rotated else 0
# get how much to displace title on axes
xscale = 1.05 if rotated else 0.0
if rotated:
yscale = 1.0
elif len(ax.get_figure().axes) > 1:
yscale = 1.15
else:
yscale = 1.05
# get class that packs text boxes vertical or horizonitally
packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker
# if no title exists
if not hasattr(ax, "title_boxes"):
# create a text box
title = "{} = {}".format(label, fmt)
tbox1 = offsetbox.TextArea(
title,
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
# save a list of text boxes as attribute for later
ax.title_boxes = [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# else append existing title
else:
# delete old title
ax.title_anchor.remove()
# add new text box to list
tbox1 = offsetbox.TextArea(
" {}".format(fmt),
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
ax.title_boxes = ax.title_boxes + [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# add new title and keep reference to instance as an attribute
anchored_ybox = offsetbox.AnchoredOffsetbox(
loc=2, child=ybox, pad=0.,
frameon=False, bbox_to_anchor=(xscale, yscale),
bbox_transform=ax.transAxes, borderpad=0.)
ax.title_anchor = ax.add_artist(anchored_ybox)
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
marginal_percentiles=None, contour_percentiles=None,
marginal_title=True, marginal_linestyle='-',
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p: val for p, val in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p: val for p, val in maxs.items()}
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# Diagonals...
if plot_marginal:
for pi, param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(
ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color,
linestyle=marginal_linestyle, linecolor=line_color,
title=marginal_title, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(
px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
def remove_common_offset(arr):
"""Given an array of data, removes a common offset > 1000, returning the
removed value.
"""
offset = 0
isneg = (arr <= 0).all()
# make sure all values have the same sign
if isneg or (arr >= 0).all():
# only remove offset if the minimum and maximum values are the same
# order of magintude and > O(1000)
minpwr = numpy.log10(abs(arr).min())
maxpwr = numpy.log10(abs(arr).max())
if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3:
offset = numpy.floor(10**minpwr)
if isneg:
offset *= -1
arr = arr - offset
return arr, int(offset)
def reduce_ticks(ax, which, maxticks=3):
"""Given a pyplot axis, resamples its `which`-axis ticks such that are at most
`maxticks` left.
Parameters
----------
ax : axis
The axis to adjust.
which : {'x' | 'y'}
Which axis to adjust.
maxticks : {3, int}
Maximum number of ticks to use.
Returns
-------
array
An array of the selected ticks.
"""
ticks = getattr(ax, 'get_{}ticks'.format(which))()
if len(ticks) > maxticks:
# make sure the left/right value is not at the edge
minax, maxax = getattr(ax, 'get_{}lim'.format(which))()
dw = abs(maxax-minax)/10.
start_idx, end_idx = 0, len(ticks)
if ticks[0] < minax + dw:
start_idx += 1
if ticks[-1] > maxax - dw:
end_idx -= 1
# get reduction factor
fac = int(len(ticks) / maxticks)
ticks = ticks[start_idx:end_idx:fac]
return ticks
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.