repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hansioan/biotoolsinspector | btinspector/Provisional_detector_corrector.py | 1 | 16126 | # coding: utf-8
import re
import pandas as pd
def dot_checker(text):
''' This function checks if the text ends with a dot '''
if not text.endswith("."):
return "Warning: the description does not end with a dot"
elif text.endswith(" .") or text.endswith("..") or text.endswith(". "):
return "Warning: the description does not end properly"
def capitalize_checker(text):
''' This function checks if the initial letter of the text is capitalized '''
if text[0].islower():
return "Warning: the description initial letter is not capitalized"
def length_checker(text):
''' This function checks if the length of the text is less than 10 or more than 500 characters '''
if len(text) < 10:
return "Warning: the length of description is shorter than 10 characters"
if len(text) > 500:
return "Warning: the length of description is longer than 500 characters"
if len(text) >= 10 and len(text) <= 500 and " " not in text:
return "Warning: the description contains only one word"
def url_checker(text):
''' This function checks if the text contains an URL '''
url_regex = re.compile("http|www.")
if url_regex.search(text):
return "Warning: the description contains an URL"
def names_checker(text, name):
''' This function checks if the text contains the tool name '''
if "+" in name:
name = name.replace("+", "\+")
if "*" in name:
name = name.replace("*", "\*")
if "(" in name:
name = name.replace("(", "\(")
if ")" in name:
name = name.replace(")", "\)")
if "[" in name:
name = name.replace("[", "\[")
if "]" in name:
name = name.replace("]", "\]")
name_regex = re.compile(name, re.IGNORECASE)
if name_regex.search(text):
return "Warning: the description may contain the tool name"
def character_checker(text):
''' This function checks if the text contains some unwanted characters '''
new_line_regex = re.compile("\n")
carriage_return_regex = re.compile("↵")
tab_regex = re.compile("\t")
bullet_point_regex = re.compile("•")
unknown_symbol_regex = re.compile("|�")
error_list = []
if new_line_regex.search(text):
error_list.append("Warning: the description contains new lines")
if carriage_return_regex.search(text):
error_list.append("Warning: the description contains carriage returns")
if tab_regex.search(text):
error_list.append("Warning: the description contains tabs")
if bullet_point_regex.search(text):
error_list.append("Warning: the description contains bullet points")
if unknown_symbol_regex.search(text):
error_list.append("Warning: the description contains unknown symbols")
return error_list
def dot_fixer(text):
''' This function calls dot_checker and returns a description with an ending dot if condition is TRUE '''
if dot_checker(text):
if text.endswith(" .") or text.endswith("..") or text.endswith(". "):
return text.replace("..", ".").replace(" .", ".").replace(". ", ".")
else:
return text + "."
def capitalize_fixer(text):
''' This function calls capitalize_checker and returns an initial-capitalized description if condition is TRUE '''
if capitalize_checker(text):
return text[0].upper() + text[1:]
def character_fixer(text):
''' This function checks if the text contains some unwanted characters and removes them if needed'''
new_line_regex = re.compile("\n")
carriage_return_regex = re.compile("↵")
tab_regex = re.compile("\t")
bullet_point_regex = re.compile("•")
unknown_symbol_regex = re.compile("|�")
error_list = []
if new_line_regex.search(text):
text = text.replace("\n", " ")
if carriage_return_regex.search(text):
text = text.replace("↵", " ").replace(" ", " ")
if tab_regex.search(text):
text = text.replace("\t", " ").replace(" "," ")
if bullet_point_regex.search(text):
text = text.replace("•", ", ").replace(" "," ")
if unknown_symbol_regex.search(text):
text = text.replace("", "").replace("�", "")
return text
def checker(text, name = None, dots_caps = True, length_check = True, url_check = True,
names_check = True, character_check = True, character_fix=True):
''' This function checks if the given text (tool description and tool name) is written according the requirements:
If dots_caps is False, the text is not checked /nor fixed if it has an ending dot and/or a capitalized initial
If length_check is False, the text is not checked if its length is out of limits and the descriptions
is made of more than one word
If url_check is False, the text is not checked if it contains an URL
If names_check is False, the text is not checked if it contains a tool name
If character_check is False, the text is not checked if it contains unwanted characters '''
name = "No name provided" if name is None else name
# List of every errors in a description
error = []
if dots_caps:
if dot_checker(text):
error.append(dot_checker(text))
text = dot_fixer(text)
if capitalize_checker(text):
error.append(capitalize_checker(text))
text = capitalize_fixer(text)
if length_check:
if length_checker(text):
error.append(length_checker(text))
if url_check:
if url_checker(text):
error.append(url_checker(text))
if names_check:
if name == "No name provided":
print "Warning: no names provided. Names cannot be checked"
else:
error.append(names_checker(text, name))
if character_check:
error += character_checker(text)
if character_fix:
text = character_fixer(text)
# If no errors are reported, the empty list is returned
if None in error:
error.remove(None)
return name, error, text
def data_iterator(texts, names=None, dot_caps_checking=True, url_checking=True, names_checking=True,
length_checking=True, character_checking=True, character_fixing=True, to_file=True, file_name="Warnings"):
'''This function iterates over the data frame/list/etc. and
applies the checker function
If dots_caps_cheking is False, the text is not checked /nor fixed if it has an ending dot and/or a capitalized initial
If length_checking is False, the text is not checked if its length is out of limits and the descriptions
is made of more than one word
If url_checkomg is False, the text is not checked if it contains an URL
If names_checking is False, the text is not checked if it contains a tool name
If character_checking is False, the text is not checked if it contains unwanted characters
If character_fixing is False, unwanted characters are not removed from the text
If to_file is False, the output is not written to the file and just returned as a list of tuples'''
error_list = []
# checking if the names are given to the function
if names is not None:
# checking if the length of text list and name list is the same
# if not, an error is raised
if len(texts)==len(names):
# iterating over the text list and applying checker function
for i in range(len(texts)):
checker_results = checker(texts[i], names[i], dots_caps=dot_caps_checking, url_check=url_checking,
names_check=names_checking, length_check=length_checking,
character_check=character_checking, character_fix=character_fixing)
# adding the checker function results from the dictionary to the list
error_list += [checker_results]
# creating a dataframe from a full list of checker function results from all the items
error_df = pd.DataFrame(error_list, columns=["Name", "Warning", "Fixed capitalized/dotted description"])
# if to_file is True, results are written to the file
if to_file:
error_df.to_csv(file_name+".csv")
# othewise, results are returned as a list of tuples with
# the tool name and errors which occurred
else:
return error_df
else:
raise Exception("The length of two lists is different")
# if names are not provided
else:
# iterating over the text list and applying checker function
for i in range(len(texts)):
checker_results = checker(texts[i], dots_caps=dot_caps_checking, url_check=url_checking,
names_check=names_checking, length_check=length_checking,
character_check=character_checking, character_fix=character_fixing)
# adding the checker function results from the dictionary to the list
error_list += [checker_results]
# creating a dataframe from a full list of checker function results from all the items
error_df = pd.DataFrame(error_list, columns=["Name", "Warning", "Fixed capitalized/dotted description"])
# if to_file is True, results are written to the file
if to_file:
error_df.to_csv(file_name+"_no_name.csv")
# othewise, results are returned as a list of tuples with the errors which occurred
else:
return error_df
def __number2bool(number):
if number==1:
return True
else:
return False
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description = "Command line tool for checking tool descriptions")
parser.add_argument("-m", "--mode", help="Mode of the checker: \"full\" - checks descriptions with their names;" \
"\"parial\" - checks only the descriptions.")
parser.add_argument("-one-file", "--one", help="Input descriptions and names from one file. "
"Usable only with mode \"full\".",
required=False, action = 'store_true')
parser.add_argument("-d", "--descriptions", help="File with descriptions.")
parser.add_argument("-n", "--names", help="File with description names.")
parser.add_argument("-sentence", "--sentence_checker", help="Check if descriptions start with a capital letter and end"
" with a dot. Options: \"1\" to use the argument,"
" \"0\" for skipping the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-url", "--url_checker", help="Check if descriptions contain URLs. Options: \"1\" to use the argument, "
"\"0\" for skipping the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-name", "--name_check", help="Check if the description contains names. Options: \"1\" to use the argument, "
"\"0\" for skipping the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-length", "--length_check", help="Check if the description length is within the limits and "
"the descriptions is made of more than one word. "
"Options: \"1\" to use the argument, \"0\" for skipping the argument. "
"Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-character", "--character_check", help="Check if the description contains unwanted characters "
"Options: \"1\" to use the argument, \"0\" for skipping "
"the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-char-fix", "--character_fix", help="Check if the description contains unwanted characters and "
"remove them. Options: \"1\" to use the argument, \"0\" for "
"skipping the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-to-file", "--file", help="Write all the output to a .csv file. Options: \"1\" to use the argument, "
"\"0\" for skipping the argument. Default is 1.",
type=int, required=False, default=True)
parser.add_argument("-filename", "--file_name", help="Choose the output file name if the argument -to-file is True. "
"Default is \"Warnings\".",
type=str, required=False, default="Warnings")
args = vars(parser.parse_args())
if args["mode"] == "full":
if args["one"]:
descriptions_names = pd.read_table(args["descriptions"], header=None, index_col=None)
names = descriptions_names[0]
descriptions = descriptions_names[1]
print data_iterator(descriptions, names,
dot_caps_checking=__number2bool(args["sentence_checker"]),
url_checking=__number2bool(args["url_checker"]),
names_checking=__number2bool(args["name_check"]),
length_checking=__number2bool(args["length_check"]),
character_checking=__number2bool(args["character_check"]),
character_fixing=__number2bool(args["character_fix"]),
to_file=__number2bool(args["file"]),
file_name=args["file_name"])
else:
descriptions = pd.read_table(args["descriptions"], header=None, index_col=None)
names = pd.read_table(args["names"], header=None, index_col=None)
names = names[0]
descriptions = descriptions[0]
print data_iterator(descriptions, names,
dot_caps_checking=__number2bool(args["sentence_checker"]),
url_checking=__number2bool(args["url_checker"]),
names_checking=__number2bool(args["name_check"]),
length_checking=__number2bool(args["length_check"]),
character_checking=__number2bool(args["character_check"]),
character_fixing=__number2bool(args["character_fix"]),
to_file=__number2bool(args["file"]),
file_name=args["file_name"])
elif args["mode"] == "partial":
descriptions = pd.read_table(args["descriptions"], header=None, index_col=None)
descriptions = descriptions[0]
print data_iterator(descriptions,
dot_caps_checking=__number2bool(args["sentence_checker"]),
url_checking=__number2bool(args["url_checker"]),
names_checking=__number2bool(args["name_check"]),
length_checking=__number2bool(args["length_check"]),
character_checking=__number2bool(args["character_check"]),
character_fixing=__number2bool(args["character_fix"]),
to_file=__number2bool(args["file"]),
file_name=args["file_name"])
else:
print "Mode Error: The mode is wrong." \
"\nUsage: python btcorrector.py <mode> <description file> <name file>" \
"\n<mode>: \"full\" or \"partial\""
| mit |
gviejo/ThalamusPhysio | python/figure_article_v3/main_article_fig_supp_2.py | 1 | 9795 | #!/usr/bin/env python
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import os
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# carte38_mouse17 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17_2.png')
# bound_map_38 = (-2336/1044, 2480/1044, 0, 2663/1044)
# cut_bound_map = (-86/1044, 2480/1044, 0, 2663/1044)
carte_adrien = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_ALL-01.png')
carte_adrien2 = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_Contour-01.png')
bound_adrien = (-398/1254, 3319/1254, -(239/1254 - 20/1044), 3278/1254)
path_snippet = "../../figures/figures_articles/figure2/"
###############################################################################################################
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean*1.8 # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
# mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 8, # LaTeX default is 10pt font.
"font.size": 7,
"legend.fontsize": 7, # Make the legend/label fonts a little smaller
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 0.8,
"ytick.major.size" : 1.5,
"xtick.major.size" : 1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
colors = ['#444b6e', '#708b75', '#9ab87a']
fig = figure(figsize = figsize(1.0), tight_layout = True)
outer = gridspec.GridSpec(4,3, figure = fig)
lb = ['a', 'b', 'c']
mn = ['a Mouse 2', 'b Mouse 3', 'c Mouse 4']
idn = [0,2,3]
for i, m in enumerate(['Mouse12', 'Mouse20', 'Mouse32']):
modulations = pd.HDFStore(path_snippet+'modulation_theta2_swr_'+m+'.h5')
###############################################################################
# 1. MAPS THETA
###############################################################################
ax1 = fig.add_subplot(outer[i,0])
noaxis(ax1)
tmp = modulations['theta']
bound = (tmp.columns[0], tmp.columns[-1], tmp.index[-1], tmp.index[0])
im = imshow(tmp, extent = bound, alpha = 0.8, aspect = 'equal', cmap = 'GnBu', vmin = 0, vmax = 1)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
pos_nb = (1.0, 1.0)
ax1.text(pos_nb[0],pos_nb[1], mn[i], transform = ax1.transAxes, fontsize = 9, fontweight='bold')
if i == 0:
#colorbar
cax = inset_axes(ax1, "40%", "4%",
bbox_to_anchor=(0.2, 1.08, 1, 1),
bbox_transform=ax1.transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'horizontal', ticks = [0,1])
cb.ax.xaxis.set_tick_params(pad = 1)
cax.set_title("Density", fontsize = 7, pad = 2.5)
ax1.text(-0.05, 1.3, "Theta modulation", transform = ax1.transAxes, fontsize = 8)
###############################################################################
# 2. MAPS SWR POS
###############################################################################
ax2 = fig.add_subplot(outer[i,1])
noaxis(ax2)
tmp = modulations['pos_swr']
im = imshow(tmp, extent = bound, alpha = 0.8, aspect = 'equal', cmap = 'Reds', vmin = 0, vmax = 1)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
if i == 0:
#colorbar
cax = inset_axes(ax2, "40%", "4%",
bbox_to_anchor=(0.2, 1.08, 1, 1),
bbox_transform=ax2.transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'horizontal', ticks = [0,1])
cb.ax.xaxis.set_tick_params(pad = 1)
cax.set_title("Density $t_{0 ms} > P_{60}$", fontsize = 7, pad = 2.5)
ax2.text(0.4, 1.3, "SWRs modulation", transform = ax2.transAxes, fontsize = 8)
###############################################################################
# 3. MAPS NEG SWR
###############################################################################
ax3 = fig.add_subplot(outer[i,2])
noaxis(ax3)
tmp = modulations['neg_swr']
im = imshow(tmp, extent = bound, alpha = 0.8, aspect = 'equal', cmap = 'Greens', vmin = 0, vmax = 1)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
if i == 0:
#colorbar
cax = inset_axes(ax3, "40%", "4%",
bbox_to_anchor=(0.2, 1.08, 1, 1),
bbox_transform=ax3.transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'horizontal', ticks = [0,1])
cb.ax.xaxis.set_tick_params(pad = 1)
cax.set_title("Density $t_{0 ms} < P_{40}$", fontsize = 7, pad = 2.5)
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr_mod = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (20,)).transpose())
swr_mod = swr_mod.drop(swr_mod.columns[swr_mod.isnull().any()].values, axis = 1)
swr_mod = swr_mod.loc[-500:500]
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
index = mappings.groupby(['nucleus']).groups
swr_MD = swr_mod[index['MD']]
swr_AD = swr_mod[index['AD']]
colors = ["#CA3242","#849FAD", "#27647B", "#57575F"]
#########################################################################
# A. MD SWR
#########################################################################
subplot(outer[3,0])
simpleaxis(gca())
gca().text(-0.3, 1.0, "d", transform = gca().transAxes, fontsize = 9, fontweight='bold')
plot(swr_MD, color = colors[1], alpha = 0.7, linewidth = 0.8)
plot(swr_MD.mean(1), color = 'black')
xlabel("Time lag (ms)")
ylabel("z (a.u.)")
ylim(-0.61,0.76)
title("MD", pad = 0.0)
# #########################################################################
# # B. AD SWR
# #########################################################################
subplot(outer[3,1])
simpleaxis(gca())
gca().text(-0.3, 1.0, "e", transform = gca().transAxes, fontsize = 9, fontweight='bold')
plot(swr_AD, color = colors[0], alpha = 0.7, linewidth = 0.8)
plot(swr_AD.mean(1), color = 'black')
xlabel("Time lag (ms)")
ylabel("z (a.u.)")
ylim(-0.61,0.76)
title("AD", pad = 0.5)
#########################################################################
# C. hist z 50 ms
#########################################################################
subplot(outer[3,2])
simpleaxis(gca())
gca().text(-0.3, 1.0, "f", transform = gca().transAxes, fontsize = 9, fontweight='bold')
hist(swr_AD.loc[-50], label = 'AD', bins = 20, alpha = 0.7, color = colors[0], histtype='stepfilled', weights = np.ones(swr_AD.shape[1])/swr_AD.shape[1])
hist(swr_MD.loc[-50], label = 'MD', bins = 20, alpha = 0.7, color = colors[1], histtype='stepfilled', weights = np.ones(swr_MD.shape[1])/swr_MD.shape[1])
legend(edgecolor = None, facecolor = None, frameon = False, loc = 'upper right')
yticks([0, 0.05, 0.1, 0.15], ['0', '5', '10', '15'])
ylabel("%")
xlabel("z (t=-50 ms)")
# subplots_adjust(top = 0.93, bottom = 0.2, right = 0.96, left = 0.08)
# fig.subplots_adjust(hspace= 1)
savefig("../../figures/figures_articles_v3/figart_supp_2.pdf", dpi = 900, facecolor = 'white')
os.system("evince ../../figures/figures_articles_v3/figart_supp_2.pdf &")
| gpl-3.0 |
HrWangChengdu/CS231n | assignment3_tianjun/exp_generation.py | 1 | 5432 | # As usual, a bit of setup
import time, os, json
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image, preprocess_image
#%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
#%load_ext autoreload
#%autoreload 2
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True)
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
def create_class_visualization(target_y, model, **kwargs):
"""
Perform optimization over the image to generate class visualizations.
Inputs:
- target_y: Integer in the range [0, 100) giving the target class
- model: A PretrainedCNN that will be used for generation
Keyword arguments:
- learning_rate: Floating point number giving the learning rate
- blur_every: An integer; how often to blur the image as a regularizer
- l2_reg: Floating point number giving L2 regularization strength on the image; this is lambda in the equation above.
- max_jitter: How much random jitter to add to the image as regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the image
"""
learning_rate = kwargs.pop('learning_rate', 10000)
blur_every = kwargs.pop('blur_every', 1)
l2_reg = kwargs.pop('l2_reg', 1e-6)
max_jitter = kwargs.pop('max_jitter', 4)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
X = np.random.randn(1, 3, 64, 64)
mode = 'test'
for t in xrange(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
scores, cache = model.forward(X, mode=mode)
class_mask = np.zeros(scores.shape)
class_mask[0,target_y] = 1
scores = scores * class_mask
dX, grads = model.backward(scores, cache)
dX = dX - l2_reg * X
X = X + learning_rate * dX
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image'])
# As a regularizer, periodically blur the image
if t % blur_every == 0:
X = blur_image(X)
# Periodically show the image
if t % show_every == 0:
plt.imshow(deprocess_image(X, data['mean_image']))
plt.gcf().set_size_inches(3, 3)
plt.axis('off')
img_path = 'images/class_%d_%d.jpg' % (target_y, t)
plt.savefig(img_path)
return X
'''
# visualize class
target_y = 50
print data['class_names'][target_y]
X = create_class_visualization(target_y, model, l2_reg=2e-5,num_iterations=300, show_every=100)
'''
# Deep Dream
def deepdream(X, layer, model, **kwargs):
"""
Generate a DeepDream image.
Inputs:
- X: Starting image, of shape (1, 3, H, W)
- layer: Index of layer at which to dream
- model: A PretrainedCNN object
Keyword arguments:
- learning_rate: How much to update the image at each iteration
- max_jitter: Maximum number of pixels for jitter regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the generated image
"""
X = X.copy()
learning_rate = kwargs.pop('learning_rate', 5.0)
max_jitter = kwargs.pop('max_jitter', 16)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
for t in xrange(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
activation, cache = model.forward(X, mode='test', start=0, end=layer)
dX, grads = model.backward(activation, cache)
X = X + learning_rate * dX
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
mean_pixel = data['mean_image'].mean(axis=(1, 2), keepdims=True)
X = np.clip(X, -mean_pixel, 255.0 - mean_pixel)
# Periodically show the image
if t == 0 or (t + 1) % show_every == 0:
img = deprocess_image(X, data['mean_image'], mean='pixel')
plt.imshow(img)
plt.title('t = %d' % (t + 1))
plt.gcf().set_size_inches(8, 8)
plt.axis('off')
filename = 'images/deepdream_%d.jpg' % (t+1)
plt.savefig(filename)
return X
def read_image(filename, max_size):
"""
Read an image from disk and resize it so its larger side is max_size
"""
img = imread(filename)
H, W, _ = img.shape
if H >= W:
img = imresize(img, (max_size, int(W * float(max_size) / H)))
elif H < W:
img = imresize(img, (int(H * float(max_size) / W), max_size))
return img
filename = 'kitten.jpg'
max_size = 256
img = read_image(filename, max_size)
plt.imshow(img)
plt.axis('off')
# Preprocess the image by converting to float, transposing,
# and performing mean subtraction.
img_pre = preprocess_image(img, data['mean_image'], mean='pixel')
out = deepdream(img_pre, 7, model, learning_rate=2000)
| mit |
cloud-fan/spark | python/pyspark/pandas/tests/test_csv.py | 15 | 15684 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
from contextlib import contextmanager
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
def normalize_text(s):
return "\n".join(map(str.strip, s.strip().split("\n")))
class CsvTest(PandasOnSparkTestCase, TestUtils):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix=CsvTest.__name__)
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
@property
def csv_text(self):
return normalize_text(
"""
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
"""
)
@property
def csv_text_2(self):
return normalize_text(
"""
A,B
item1,1
item2,1,2
item3,1,2,3,4
item4,1
"""
)
@property
def csv_text_with_comments(self):
return normalize_text(
"""
# header
%s
# comment
Alice,400
Edith,600
# footer
"""
% self.csv_text
)
@property
def tab_delimited_csv_text(self):
return normalize_text(
"""
name\tamount
Alice\t100
Bob\t-200
Charlie\t300
"""
)
@property
def q_quoted_csv_text(self):
return normalize_text(
"""
QnameQ,QamountQ
QA,liceQ,Q100Q
QB,obQ,Q-200Q
QC,harlieQ,Q300Q
"""
)
@property
def e_escapeted_csv_text(self):
return normalize_text(
"""
name,amount
"AE"lice",100
"BE"ob",-200
"CE"harlie",300
"""
)
@contextmanager
def csv_file(self, csv):
with self.temp_file() as tmp:
with open(tmp, "w") as f:
f.write(csv)
yield tmp
def test_read_csv(self):
with self.csv_file(self.csv_text) as fn:
def check(header="infer", names=None, usecols=None, index_col=None):
expected = pd.read_csv(
fn, header=header, names=names, usecols=usecols, index_col=index_col
)
actual = ps.read_csv(
fn, header=header, names=names, usecols=usecols, index_col=index_col
)
self.assert_eq(expected, actual, almost=True)
check()
check(header=0)
check(header=None)
check(names=["n", "a"])
check(names=[("x", "n"), ("y", "a")])
check(names=[10, 20])
check(header=0, names=["n", "a"])
check(usecols=[1])
check(usecols=[1, 0])
check(usecols=["amount"])
check(usecols=["amount", "name"])
check(usecols=[])
check(usecols=[1, 1])
check(usecols=["amount", "amount"])
check(header=None, usecols=[1])
check(names=["n", "a"], usecols=["a"])
check(header=None, names=["n", "a"], usecols=["a"])
check(index_col=["amount"])
check(header=None, index_col=[1])
check(names=["n", "a"], index_col=["a"])
# check with pyspark patch.
expected = pd.read_csv(fn)
actual = ps.read_csv(fn)
self.assert_eq(expected, actual, almost=True)
self.assertRaisesRegex(
ValueError, "non-unique", lambda: ps.read_csv(fn, names=["n", "n"])
)
self.assertRaisesRegex(
ValueError,
"does not match the number.*3",
lambda: ps.read_csv(fn, names=["n", "a", "b"]),
)
self.assertRaisesRegex(
ValueError,
"does not match the number.*3",
lambda: ps.read_csv(fn, header=0, names=["n", "a", "b"]),
)
self.assertRaisesRegex(
ValueError, "Usecols do not match.*3", lambda: ps.read_csv(fn, usecols=[1, 3])
)
self.assertRaisesRegex(
ValueError,
"Usecols do not match.*col",
lambda: ps.read_csv(fn, usecols=["amount", "col"]),
)
self.assertRaisesRegex(
ValueError, "Unknown header argument 1", lambda: ps.read_csv(fn, header="1")
)
expected_error_message = (
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
self.assertRaisesRegex(
ValueError, expected_error_message, lambda: ps.read_csv(fn, usecols=[1, "amount"])
)
# check with index_col
expected = pd.read_csv(fn).set_index("name")
actual = ps.read_csv(fn, index_col="name")
self.assert_eq(expected, actual, almost=True)
def test_read_with_spark_schema(self):
with self.csv_file(self.csv_text_2) as fn:
actual = ps.read_csv(fn, names="A string, B string, C long, D long, E long")
expected = pd.read_csv(fn, names=["A", "B", "C", "D", "E"])
self.assert_eq(expected, actual)
def test_read_csv_with_comment(self):
with self.csv_file(self.csv_text_with_comments) as fn:
expected = pd.read_csv(fn, comment="#")
actual = ps.read_csv(fn, comment="#")
self.assert_eq(expected, actual, almost=True)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment="").show(),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment="##").show(),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment=1),
)
self.assertRaisesRegex(
ValueError,
"Only length-1 comment characters supported",
lambda: ps.read_csv(fn, comment=[1]),
)
def test_read_csv_with_limit(self):
with self.csv_file(self.csv_text_with_comments) as fn:
expected = pd.read_csv(fn, comment="#", nrows=2)
actual = ps.read_csv(fn, comment="#", nrows=2)
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_sep(self):
with self.csv_file(self.tab_delimited_csv_text) as fn:
expected = pd.read_csv(fn, sep="\t")
actual = ps.read_csv(fn, sep="\t")
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_squeeze(self):
with self.csv_file(self.csv_text) as fn:
expected = pd.read_csv(fn, squeeze=True, usecols=["name"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name"])
self.assert_eq(expected, actual, almost=True)
expected = pd.read_csv(fn, squeeze=True, usecols=["name", "amount"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name", "amount"])
self.assert_eq(expected, actual, almost=True)
expected = pd.read_csv(fn, squeeze=True, usecols=["name", "amount"], index_col=["name"])
actual = ps.read_csv(fn, squeeze=True, usecols=["name", "amount"], index_col=["name"])
self.assert_eq(expected, actual, almost=True)
def test_read_csv_with_mangle_dupe_cols(self):
self.assertRaisesRegex(
ValueError, "mangle_dupe_cols", lambda: ps.read_csv("path", mangle_dupe_cols=False)
)
def test_read_csv_with_parse_dates(self):
self.assertRaisesRegex(
ValueError, "parse_dates", lambda: ps.read_csv("path", parse_dates=True)
)
def test_read_csv_with_dtype(self):
with self.csv_file(self.csv_text) as fn:
self.assert_eq(ps.read_csv(fn), pd.read_csv(fn), almost=True)
self.assert_eq(ps.read_csv(fn, dtype=str), pd.read_csv(fn, dtype=str))
self.assert_eq(
ps.read_csv(fn, dtype={"amount": "int64"}),
pd.read_csv(fn, dtype={"amount": "int64"}),
)
def test_read_csv_with_quotechar(self):
with self.csv_file(self.q_quoted_csv_text) as fn:
self.assert_eq(
ps.read_csv(fn, quotechar="Q"), pd.read_csv(fn, quotechar="Q"), almost=True
)
def test_read_csv_with_escapechar(self):
with self.csv_file(self.e_escapeted_csv_text) as fn:
self.assert_eq(
ps.read_csv(fn, escapechar="E"), pd.read_csv(fn, escapechar="E"), almost=True
)
self.assert_eq(
ps.read_csv(fn, escapechar="ABC", escape="E"),
pd.read_csv(fn, escapechar="E"),
almost=True,
)
def test_to_csv(self):
pdf = pd.DataFrame({"aa": [1, 2, 3], "bb": [4, 5, 6]}, index=[0, 1, 3])
psdf = ps.DataFrame(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(columns=["aa"]), pdf.to_csv(columns=["aa"], index=False))
self.assert_eq(psdf.aa.to_csv(), pdf.aa.to_csv(index=False, header=True))
pdf = pd.DataFrame({"a": [1, np.nan, 3], "b": ["one", "two", None]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_csv(na_rep="null"), pdf.to_csv(na_rep="null", index=False))
self.assert_eq(
psdf.a.to_csv(na_rep="null"), pdf.a.to_csv(na_rep="null", index=False, header=True)
)
self.assertRaises(KeyError, lambda: psdf.to_csv(columns=["ab"]))
pdf = pd.DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}, index=[0, 1, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(header=False), pdf.to_csv(header=False, index=False))
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
# non-string names
pdf = pd.DataFrame({10: [1, 2, 3], 20: [4, 5, 6]}, index=[0, 1, 3])
psdf = ps.DataFrame(pdf)
self.assert_eq(psdf.to_csv(), pdf.to_csv(index=False))
self.assert_eq(psdf.to_csv(columns=[10]), pdf.to_csv(columns=[10], index=False))
self.assertRaises(TypeError, lambda: psdf.to_csv(columns=10))
def _check_output(self, dir, expected):
output_paths = [path for path in os.listdir(dir) if path.startswith("part-")]
assert len(output_paths) > 0
output_path = "%s/%s" % (dir, output_paths[0])
with open(output_path) as f:
self.assertEqual(f.read(), expected)
def test_to_csv_with_path(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
tmp_dir = "{}/tmp1".format(self.tmp_dir)
psdf.to_csv(tmp_dir, num_files=1)
self._check_output(tmp_dir, pdf.to_csv(index=False))
tmp_dir = "{}/tmp2".format(self.tmp_dir)
self.assertRaises(KeyError, lambda: psdf.to_csv(tmp_dir, columns=["c"], num_files=1))
# non-string names
pdf = pd.DataFrame({10: [1, 2, 3], 20: ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
tmp_dir = "{}/tmp3".format(self.tmp_dir)
psdf.to_csv(tmp_dir, num_files=1)
self._check_output(tmp_dir, pdf.to_csv(index=False))
tmp_dir = "{}/tmp4".format(self.tmp_dir)
psdf.to_csv(tmp_dir, columns=[10], num_files=1)
self._check_output(tmp_dir, pdf.to_csv(columns=[10], index=False))
tmp_dir = "{}/tmp5".format(self.tmp_dir)
self.assertRaises(TypeError, lambda: psdf.to_csv(tmp_dir, columns=10, num_files=1))
def test_to_csv_with_path_and_basic_options(self):
pdf = pd.DataFrame({"aa": [1, 2, 3], "bb": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", header=False, columns=["aa"])
expected = pdf.to_csv(index=False, sep="|", header=False, columns=["aa"])
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_path_and_basic_options_multiindex_columns(self):
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
with self.assertRaises(ValueError):
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", columns=[("x", "a")])
psdf.to_csv(self.tmp_dir, num_files=1, sep="|", header=["a"], columns=[("x", "a")])
pdf.columns = ["a", "b"]
expected = pdf.to_csv(index=False, sep="|", columns=["a"])
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_path_and_pyspark_options(self):
pdf = pd.DataFrame({"a": [1, 2, 3, None], "b": ["a", "b", "c", None]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, nullValue="null", num_files=1)
expected = pdf.to_csv(index=False, na_rep="null")
self._check_output(self.tmp_dir, expected)
def test_to_csv_with_partition_cols(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
psdf = ps.DataFrame(pdf)
psdf.to_csv(self.tmp_dir, partition_cols="b", num_files=1)
partition_paths = [path for path in os.listdir(self.tmp_dir) if path.startswith("b=")]
assert len(partition_paths) > 0
for partition_path in partition_paths:
column, value = partition_path.split("=")
expected = pdf[pdf[column] == value].drop("b", axis=1).to_csv(index=False)
output_paths = [
path
for path in os.listdir("%s/%s" % (self.tmp_dir, partition_path))
if path.startswith("part-")
]
assert len(output_paths) > 0
output_path = "%s/%s/%s" % (self.tmp_dir, partition_path, output_paths[0])
with open(output_path) as f:
self.assertEqual(f.read(), expected)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_csv import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
walterreade/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 24671 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
huzq/scikit-learn | sklearn/neighbors/_lof.py | 1 | 21184 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from ._base import NeighborsBase
from ._base import KNeighborsMixin
from ._base import UnsupervisedMixin
from ..base import OutlierMixin
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(KNeighborsMixin, UnsupervisedMixin,
OutlierMixin, NeighborsBase):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
.. versionadded:: 0.19
Parameters
----------
n_neighbors : int, default=20
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : int, default=2
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : bool, default=False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
that you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
.. versionadded:: 0.20
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
negative_outlier_factor_ : ndarray of shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1
(``negative_outlier_factor_`` close to -1), while outliers tend to have
a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : int
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
.. versionadded:: 0.20
effective_metric_ : str
The effective metric used for the distance computation.
effective_metric_params_ : dict
The effective additional keyword arguments for the metric function.
n_samples_fit_ : int
It is the number of samples in the fitted data.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import LocalOutlierFactor
>>> X = [[-1.1], [0.2], [101.1], [0.3]]
>>> clf = LocalOutlierFactor(n_neighbors=2)
>>> clf.fit_predict(X)
array([ 1, 1, -1, 1])
>>> clf.negative_outlier_factor_
array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...])
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
@_deprecate_positional_args
def __init__(self, n_neighbors=20, *, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination="auto", novelty=False, n_jobs=None):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
self.novelty = novelty
@property
def fit_predict(self):
"""Fits the model to the training set X and returns the labels.
**Only available for novelty detection (when novelty is set to True).**
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
if self.novelty:
msg = ('fit_predict is not available when novelty=True. Use '
'novelty=False if you want to predict on the training set.')
raise AttributeError(msg)
return self._fit_predict
def _fit_predict(self, X, y=None):
"""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : BallTree, KDTree or {array-like, sparse matrix} of shape \
(n_samples, n_features) or (n_samples, n_samples)
Training data. If array or matrix, the shape is (n_samples,
n_features), or (n_samples, n_samples) if metric='precomputed'.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
"""
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
super().fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
n_neighbors=self.n_neighbors_)
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_,
100. * self.contamination)
return self
@property
def predict(self):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
**Only available for novelty detection (when novelty is set to True).**
This method allows to generalize prediction to *new observations* (not
in the training set).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
if not self.novelty:
msg = ('predict is not available when novelty=False, use '
'fit_predict if you want to predict on training data. Use '
'novelty=True if you want to use LOF for novelty detection '
'and predict on new unseen data.')
raise AttributeError(msg)
return self._predict
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
else:
is_inlier = np.ones(self.n_samples_fit_, dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
@property
def decision_function(self):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
if not self.novelty:
msg = ('decision_function is not available when novelty=False. '
'Use novelty=True if you want to use LOF for novelty '
'detection and compute decision_function for new unseen '
'data. Note that the opposite LOF of the training samples '
'is always available by considering the '
'negative_outlier_factor_ attribute.')
raise AttributeError(msg)
return self._decision_function
def _decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self._score_samples(X) - self.offset_
@property
def score_samples(self):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
if not self.novelty:
msg = ('score_samples is not available when novelty=False. The '
'scores of the training samples are always available '
'through the negative_outlier_factor_ attribute. Use '
'novelty=True if you want to use LOF for novelty detection '
'and compute score_samples for new unseen data.')
raise AttributeError(msg)
return self._score_samples
def _score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : ndarray of shape (n_queries, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : ndarray of shape (n_queries,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/ensemble/tests/test_base.py | 28 | 1334 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
"""Check BaseEnsemble methods."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
"""Check that instantiating a BaseEnsemble with n_estimators<=0 raises
a ValueError."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
vipmunot/Data-Analysis-using-Python | Machine learning Intermediate/Introduction to neural networks-121.py | 1 | 8043 | ## 1. Neural networks and iris flowers ##
import pandas
import matplotlib.pyplot as plt
import numpy as np
# Read in dataset
iris = pandas.read_csv("iris.csv")
# shuffle rows
shuffled_rows = np.random.permutation(iris.index)
iris = iris.loc[shuffled_rows,:]
print(iris.head())
# There are 2 species
print(iris.species.unique())
iris.hist()
plt.show()
## 2. Neurons ##
z = np.asarray([[9, 5, 4]])
y = np.asarray([[-1, 2, 4]])
# np.dot is used for matrix multiplication
# z is 1x3 and y is 1x3, z * y.T is then 1x1
print(np.dot(z,y.T))
# Variables to test sigmoid_activation
iris["ones"] = np.ones(iris.shape[0])
X = iris[['ones', 'sepal_length', 'sepal_width', 'petal_length', 'petal_width']].values
y = (iris.species == 'Iris-versicolor').values.astype(int)
# The first observation
x0 = X[0]
# Initialize thetas randomly
theta_init = np.random.normal(0,0.01,size=(5,1))
def sigmoid_activation(x,theta):
return 1/ (1+ np.exp(-np.dot(theta.T , x)))
a1 = sigmoid_activation(x0,theta_init)
## 3. Cost function ##
# First observation's features and target
x0 = X[0]
y0 = y[0]
# Initialize parameters, we have 5 units and just 1 layer
theta_init = np.random.normal(0,0.01,size=(5,1))
def singlecost(X,y,theta):
h = 1/ (1+ np.exp(-np.dot(theta.T,X)))
return -np.mean(y * np.log(h) + (1-y) * np.log(1-h))
first_cost = singlecost(x0,y0,theta_init)
## 4. Compute the Gradients ##
# Initialize parameters
theta_init = np.random.normal(0,0.01,size=(5,1))
# Store the updates into this array
grads = np.zeros(theta_init.shape)
# Number of observations
n = X.shape[0]
for j, obs in enumerate(X):
# Compute activation
h = sigmoid_activation(obs, theta_init)
# Get delta
delta = (y[j]-h) * h * (1-h) * obs
# accumulate
grads += delta[:,np.newaxis]/X.shape[0]
## 5. Two layer network ##
theta_init = np.random.normal(0,0.01,size=(5,1))
# set a learning rate
learning_rate = 0.1
# maximum number of iterations for gradient descent
maxepochs = 10000
# costs convergence threshold, ie. (prevcost - cost) > convergence_thres
convergence_thres = 0.0001
def learn(X, y, theta, learning_rate, maxepochs, convergence_thres):
costs = []
cost = singlecost(X, y, theta) # compute initial cost
costprev = cost + convergence_thres + 0.01 # set an inital costprev to past while loop
counter = 0 # add a counter
# Loop through until convergence
for counter in range(maxepochs):
grads = np.zeros(theta.shape)
for j, obs in enumerate(X):
h = sigmoid_activation(obs, theta) # Compute activation
delta = (y[j]-h) * h * (1-h) * obs # Get delta
grads += delta[:,np.newaxis]/X.shape[0] # accumulate
# update parameters
theta += grads * learning_rate
counter += 1 # count
costprev = cost # store prev cost
cost = singlecost(X, y, theta) # compute new cost
costs.append(cost)
if np.abs(costprev-cost) < convergence_thres:
break
plt.plot(costs)
plt.title("Convergence of the Cost Function")
plt.ylabel("J($\Theta$)")
plt.xlabel("Iteration")
plt.show()
return theta
theta = learn(X, y, theta_init, learning_rate, maxepochs, convergence_thres)
## 6. Neural Network ##
theta0_init = np.random.normal(0,0.01,size=(5,4))
theta1_init = np.random.normal(0,0.01,size=(5,1))
def feedforward(X, theta0, theta1):
# feedforward to the first layer
a1 = sigmoid_activation(X.T, theta0).T
# add a column of ones for bias term
a1 = np.column_stack([np.ones(a1.shape[0]), a1])
# activation units are then inputted to the output layer
out = sigmoid_activation(a1.T, theta1)
return out
h = feedforward(X, theta0_init, theta1_init)
## 7. Multiple neural network cost function ##
theta0_init = np.random.normal(0,0.01,size=(5,4))
theta1_init = np.random.normal(0,0.01,size=(5,1))
# X and y are in memory and should be used as inputs to multiplecost()
def multiplecost(X, y,theta0_init, theta1_init):
return -np.mean((y * np.log(h)) + (1-y) * np.log(1-h))
c = multiplecost(X,y,theta0_init, theta1_init)
## 8. Backpropagation ##
# Use a class for this model, it's good practice and condenses the code
class NNet3:
def __init__(self, learning_rate=0.5, maxepochs=1e4, convergence_thres=1e-5, hidden_layer=4):
self.learning_rate = learning_rate
self.maxepochs = int(maxepochs)
self.convergence_thres = 1e-5
self.hidden_layer = int(hidden_layer)
def _multiplecost(self, X, y):
# feed through network
l1, l2 = self._feedforward(X)
# compute error
inner = y * np.log(l2) + (1-y) * np.log(1-l2)
# negative of average error
return -np.mean(inner)
def _feedforward(self, X):
# feedforward to the first layer
l1 = sigmoid_activation(X.T, self.theta0).T
# add a column of ones for bias term
l1 = np.column_stack([np.ones(l1.shape[0]), l1])
# activation units are then inputted to the output layer
l2 = sigmoid_activation(l1.T, self.theta1)
return l1, l2
def predict(self, X):
_, y = self._feedforward(X)
return y
def learn(self, X, y):
nobs, ncols = X.shape
self.theta0 = np.random.normal(0,0.01,size=(ncols,self.hidden_layer))
self.theta1 = np.random.normal(0,0.01,size=(self.hidden_layer+1,1))
self.costs = []
cost = self._multiplecost(X, y)
self.costs.append(cost)
costprev = cost + self.convergence_thres+1 # set an inital costprev to past while loop
counter = 0 # intialize a counter
# Loop through until convergence
for counter in range(self.maxepochs):
# feedforward through network
l1, l2 = self._feedforward(X)
# Start Backpropagation
# Compute gradients
l2_delta = (y-l2) * l2 * (1-l2)
l1_delta = l2_delta.T.dot(self.theta1.T) * l1 * (1-l1)
# Update parameters by averaging gradients and multiplying by the learning rate
self.theta1 += l1.T.dot(l2_delta.T) / nobs * self.learning_rate
self.theta0 += X.T.dot(l1_delta)[:,1:] / nobs * self.learning_rate
# Store costs and check for convergence
counter += 1 # Count
costprev = cost # Store prev cost
cost = self._multiplecost(X, y) # get next cost
self.costs.append(cost)
if np.abs(costprev-cost) < self.convergence_thres and counter > 500:
break
# Set a learning rate
learning_rate = 0.5
# Maximum number of iterations for gradient descent
maxepochs = 10000
# Costs convergence threshold, ie. (prevcost - cost) > convergence_thres
convergence_thres = 0.00001
# Number of hidden units
hidden_units = 4
# Initialize model
model = NNet3(learning_rate=learning_rate, maxepochs=maxepochs,
convergence_thres=convergence_thres, hidden_layer=hidden_units)
# Train model
model.learn(X, y)
# Plot costs
plt.plot(model.costs)
plt.title("Convergence of the Cost Function")
plt.ylabel("J($\Theta$)")
plt.xlabel("Iteration")
plt.show()
## 9. Splitting data ##
# First 70 rows to X_train and y_train
# Last 30 rows to X_train and y_train
X_train = X[0:70]
y_train = y[0:70]
X_test = X[70:len(X)]
y_test = y[70:len(y)]
## 10. Predicting iris flowers ##
from sklearn.metrics import roc_auc_score
# Set a learning rate
learning_rate = 0.5
# Maximum number of iterations for gradient descent
maxepochs = 10000
# Costs convergence threshold, ie. (prevcost - cost) > convergence_thres
convergence_thres = 0.00001
# Number of hidden units
hidden_units = 4
# Initialize model
model = NNet3(learning_rate=learning_rate, maxepochs=maxepochs,
convergence_thres=convergence_thres, hidden_layer=hidden_units)
model.learn(X_train,y_train)
x = model.predict(X_test)[0]
auc = roc_auc_score(y_test, x) | mit |
philharmonic/philharmonic | philharmonic/scheduler/tests/test_bcf_scheduler.py | 2 | 4102 | from nose.tools import *
from mock import MagicMock
import pandas as pd
from philharmonic import Schedule
from philharmonic.scheduler.bcf_scheduler import *
from philharmonic.simulator.environment import FBFSimpleSimulatedEnvironment
from philharmonic.simulator.simulator import FBFSimulator
from philharmonic.simulator import inputgen
from philharmonic import Cloud, VMRequest, \
VM, Server, State, Migration
def test_bcf_returns_schedule():
scheduler = BCFScheduler()
scheduler.cloud = Cloud()
scheduler.environment = FBFSimpleSimulatedEnvironment()
scheduler.environment.get_requests = MagicMock(return_value = [])
scheduler.environment.get_time = MagicMock(return_value = 1)
schedule = scheduler.reevaluate()
assert_is_instance(schedule, Schedule)
def test_bcf_reevaluate_initial_placement():
scheduler = BCFScheduler()
scheduler.environment = FBFSimpleSimulatedEnvironment()
s1, s2 = Server(4000, 4), Server(4000, 4)
s1.loc = 'A'
s2.loc = 'B'
vm1 = VM(2000, 1)
vm2 = VM(1000, 2)
vm3 = VM(2000, 2)
cloud = Cloud([s1, s2], [vm1, vm2, vm3])
scheduler.cloud = cloud
r1 = VMRequest(vm1, 'boot')
r2 = VMRequest(vm2, 'boot')
r3 = VMRequest(vm3, 'boot')
scheduler.environment.get_requests = MagicMock(return_value = [r1, r2, r3])
scheduler.environment.t = 1
#scheduler.environment.get_time = MagicMock(return_value = 1)
# location B cheaper el. price, temperatures the same
el = pd.DataFrame({'A': [0.16], 'B': [0.08]}, [1])
temp = pd.DataFrame({'A': [15], 'B': [15]}, [1])
scheduler.environment.current_data = MagicMock(return_value = (el, temp))
schedule = scheduler.reevaluate()
for action in schedule.actions:
cloud.apply_real(action)
current = cloud.get_current()
#assert location of vms
assert_true(current.all_allocated())
assert_true(current.all_within_capacity())
assert_equals(current.allocation(vm3), s2, 'cheaper location picked first')
assert_equals(current.allocation(vm2), s2, 'cheaper location picked first')
assert_equals(current.allocation(vm1), s1, 'when no room, the next best')
def test_bcf_reevaluate_underutilised():
scheduler = BCFScheduler()
scheduler.environment = FBFSimpleSimulatedEnvironment()
s1, s2 = Server(40000, 12, location='A'), Server(20000, 10, location='A')
vm1 = VM(2000, 1)
vm2 = VM(2000, 1)
r2 = VMRequest(vm2, 'boot')
cloud = Cloud([s1, s2], [vm1, vm2])
scheduler.cloud = cloud
scheduler.environment.get_requests = MagicMock(return_value = [r2])
scheduler.environment.t = 1
el = pd.DataFrame({'A': [0.08], 'B': [0.08]}, [1])
temp = pd.DataFrame({'A': [15], 'B': [15]}, [1])
scheduler.environment.current_data = MagicMock(return_value = (el, temp))
# the initial state is a VM hosted on an underutilised PM
cloud.apply_real(Migration(vm1, s2))
schedule = scheduler.reevaluate()
for action in schedule.actions:
cloud.apply_real(action)
current = cloud.get_current()
assert_true(current.all_within_capacity())
assert_equals(current.allocation(vm1), s1)
assert_equals(current.allocation(vm2), s1)
assert_true(current.all_allocated())
def test_sort_pms_best_first():
pm1 = Server(2000, 2, location='B')
pm2 = Server(4000, 4, location='A')
pm3 = Server(4000, 4, location='B')
cost = pd.Series({'A': [0.04], 'B': [0.08], 'C': [0.16]})
servers = [pm2, pm3, pm1]
state = State(servers, [])
sorted_pms = sort_active_pms(servers, state, cost)
assert_equals(sorted_pms, [pm1, pm2, pm3])
def test_sort_pms_cost_first_same_cost():
"""if costs are equal, check that bigger server returned first"""
pm1 = Server(2000, 1, location='A')
pm2 = Server(3000, 1, location='A')
pm3 = Server(3000, 2, location='A')
pm4 = Server(3000, 4, location='A')
cost = pd.Series({'A': [0.08], 'B': [0.08]})
servers = [pm2, pm1, pm4, pm3]
state = State(servers, [])
sorted_pms = sort_inactive_pms(servers, state, cost)
assert_equals(sorted_pms, [pm4, pm3, pm2, pm1])
| gpl-3.0 |
nanophotonics/nplab | examples/keithley_simple_experiment.py | 1 | 1555 | """
This example demonstrates a real experiment using a Keithley SMU. The main focus here is on the
Instrument subclass which allows a user to get a previously created instance of the Keithley
class to use during the experiment.
"""
from future import standard_library
standard_library.install_aliases()
__author__ = 'alansanders'
from nplab.instrument.electronics.keithley_2635a_smu import Keithley2635A
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
from threading import Thread
def run_experiment(voltages, currents):
smu = Keithley2635A.get_instance()
smu.output = 1
sleep(0.001)
for j,v in enumerate(voltages):
smu.src_voltage = v
sleep(0.001)
currents[j] = smu.get_meas_current()
smu.output = 0
def example_experiment(start, stop, step):
voltages = np.linspace(start, stop, step)
currents = np.zeros_like(voltages)
plt.ion()
fig = plt.figure()
l1, = plt.plot(voltages, currents, 'ko-')
plt.xlabel('voltage (V)')
plt.ylabel('current (A)')
plt.show()
thread = Thread(target=run_experiment, args=(voltages, currents))
thread.start()
running = True
while running:
running = thread.is_alive()
l1.set_data(voltages, currents)
for ax in fig.axes:
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
sleep(0.1)
plt.show(block=True)
if __name__ == '__main__':
smu = Keithley2635A()
max_I = 1e-3
max_V = 1e3 * max_I
example_experiment(0, max_V, 50) | gpl-3.0 |
poeticcapybara/pythalesians | pythalesians-examples/plotfactory_examples.py | 1 | 7407 | __author__ = 'saeedamen' # Saeed Amen / [email protected]
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
plotfactory_examples
Examples to show how to use PlotFactory to make charts (at present only line charts have a lot of support).
"""
import datetime
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.graphics.graphs.plotfactory import PlotFactory
from pythalesians.graphics.graphs.graphproperties import GraphProperties
if True:
pf = PlotFactory()
# test simple PyThalesians/Bokeh time series line charts
if False:
ltsf = LightTimeSeriesFactory()
start = '01 Jan 2000'
end = datetime.datetime.utcnow()
tickers = ['AUDJPY', 'USDJPY']
vendor_tickers = ['AUDJPY BGN Curncy', 'USDJPY BGN Curncy']
time_series_request = TimeSeriesRequest(
start_date = start, # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
daily_vals = ltsf.harvest_time_series(time_series_request)
pf = PlotFactory()
gp = GraphProperties()
gp.title = 'Spot values'
gp.file_output = 'output_data/demo.png'
gp.html_file_output = 'output_data/demo.htm'
gp.source = 'Thalesians/BBG'
# plot using PyThalesians
pf.plot_line_graph(daily_vals, adapter = 'pythalesians', gp = gp)
# plot using Bokeh (still needs a lot of work!)
pf.plot_line_graph(daily_vals, adapter = 'bokeh', gp = gp)
# do more complicated charts using several different Matplotib stylesheets (which have been customised)
if False:
ltsf = LightTimeSeriesFactory()
# load market data
start = '01 Jan 1970'
end = datetime.datetime.utcnow()
tickers = ['AUDJPY', 'USDJPY']
vendor_tickers = ['AUDJPY BGN Curncy', 'USDJPY BGN Curncy']
time_series_request = TimeSeriesRequest(
start_date = start, # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
daily_vals = ltsf.harvest_time_series(time_series_request)
# plot data
gp = GraphProperties()
pf = PlotFactory()
gp.title = 'Spot values'
gp.file_output = 'output_data/demo.png'
gp.scale_factor = 2
gp.style_sheet = 'pythalesians'
# first use PyThalesians matplotlib wrapper
pf.plot_line_graph(daily_vals, adapter = 'pythalesians', gp = gp)
pf.plot_generic_graph(daily_vals, gp = gp, type = 'line')
# use modified 538 Matplotlib stylesheet
gp.style_sheet = '538-pythalesians'
pf.plot_line_graph(daily_vals, adapter = 'pythalesians', gp = gp)
# use miletus matplotlib stylesheet
gp.style_sheet = 'miletus-pythalesians'
pf.plot_line_graph(daily_vals, adapter = 'pythalesians', gp = gp)
# use ggplot matplotlib styleheet
gp.scale_factor = 1
gp.display_brand_label = False
gp.display_source = False
gp.style_sheet = 'ggplot-pythalesians'
gp.display_mpld3 = True
gp.html_file_output = 'output_data/demo.htm'
pf.plot_line_graph(daily_vals, adapter = 'pythalesians', gp = gp)
# now use PyThalesians bokeh wrapper (still needs a lot of work!)
gp.scale_factor = 2
gp.html_file_output = 'output_data/demo_bokeh.htm'
pf.plot_line_graph(daily_vals, adapter = 'bokeh', gp = gp)
# test simple PyThalesians bar charts - calculate yearly returns for various assets
if False:
ltsf = LightTimeSeriesFactory()
start = '01 Jan 2000'
end = datetime.datetime.utcnow()
tickers = ['AUDJPY', 'USDJPY', 'EURUSD', 'S&P500']
vendor_tickers = ['AUDJPY BGN Curncy', 'USDJPY BGN Curncy', 'EURUSD BGN Curncy', 'SPX Index']
time_series_request = TimeSeriesRequest(
start_date = start, # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
daily_vals = ltsf.harvest_time_series(time_series_request)
# resample for year end
daily_vals = daily_vals.resample('A')
daily_vals = daily_vals / daily_vals.shift(1) - 1
daily_vals.index = daily_vals.index.year
daily_vals = daily_vals.drop(daily_vals.head(1).index)
pf = PlotFactory()
gp = GraphProperties()
gp.source = 'Thalesians/BBG'
gp.title = 'Yearly changes in spot'
gp.scale_factor = 3
gp.y_title = "Percent Change"
daily_vals = daily_vals * 100
# plot using PyThalesians (stacked & then bar graph)
pf.plot_stacked_graph(daily_vals, adapter = 'pythalesians', gp = gp)
pf.plot_bar_graph(daily_vals, adapter = 'pythalesians', gp = gp)
| apache-2.0 |
ywcui1990/nupic.research | htmresearch/frameworks/poirazi_neuron_model/neuron_model.py | 5 | 9343 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
import numpy
import copy
from sklearn.cluster import KMeans
from collections import Counter
from nupic.bindings.math import *
numpy.set_printoptions(threshold=numpy.inf)
def power_nonlinearity(power):
def l(activations):
original_activations = copy.deepcopy(activations)
for i in range(power - 1):
activations.elementNZMultiply(original_activations)
return activations
return l
def threshold_nonlinearity(threshold):
def l(activations):
activations.threshold(threshold)
return activations
return l
def sigmoid(center, scale):
return lambda x: 1./(1. + numpy.exp(scale*(center - x)))
def sigmoid_nonlinearity(center, scale):
def l(activations):
dense = activations.toDense()
f = sigmoid(center, scale)
return SM32(f(dense))
return l
class Matrix_Neuron(object):
def __init__(self,
size = 10000,
num_dendrites = 1000,
dendrite_length = 10,
dim = 400,
nonlinearity = threshold_nonlinearity(6),
initial_permanence = 0.5,
permanence_threshold = 0.15,
permanence_decrement = 0.0125,
permanence_increment = 0.02):
self.size = size
self.num_dendrites = num_dendrites
self.dendrite_length = dendrite_length
self.dim = dim
self.nonlinearity = nonlinearity
self.initial_permanence = initial_permanence
self.permanence_threshold = permanence_threshold
self.permanence_decrement = permanence_decrement
self.permanence_increment = permanence_increment
self.initialize_dendrites()
self.initialize_permanences()
def initialize_dendrites(self):
"""
Initialize all the dendrites of the neuron to a set of random connections
"""
# Wipe any preexisting connections by creating a new connection matrix
self.dendrites = SM32()
self.dendrites.reshape(self.dim, self.num_dendrites)
for row in range(self.num_dendrites):
synapses = numpy.random.choice(self.dim, self.dendrite_length, replace = False)
for synapse in synapses:
self.dendrites[synapse, row] = 1
def initialize_permanences(self):
self.permanences = copy.deepcopy(self.dendrites)
self.permanences = self.permanences*self.initial_permanence
def calculate_activation(self, datapoint):
"""
Only for a single datapoint
"""
activations = datapoint * self.dendrites
activations = self.nonlinearity(activations)
return activations.sum()
def calculate_on_entire_dataset(self, data):
activations = data * self.dendrites
activations = self.nonlinearity(activations)
return activations.rowSums()
def HTM_style_initialize_on_data(self, data, labels):
"""
Uses a style of initialization inspired by the temporal memory. When a new positive example is found,
a dendrite is chosen and a number of synapses are created to the example.
This works intelligently with an amount of data larger than the number of available dendrites.
In this case, data is clustered, and then similar datapoints are allotted to shared dendrites,
with as many overlapping bits as possible chosen. In practice, it is still better to simply
allocate enough dendrites to have one per datapoint, but this method at least allows initialization
to work on larger amounts of data.
"""
current_dendrite = 0
self.dendrites = SM32()
self.dendrites.reshape(self.dim, self.num_dendrites)
# We want to avoid training on any negative examples
data = copy.deepcopy(data)
data.deleteRows([i for i, v in enumerate(labels) if v != 1])
if data.nRows() > self.num_dendrites:
print "Neuron using clustering to initialize dendrites"
data = (data.toDense())
model = KMeans(n_clusters = self.num_dendrites, n_jobs=1)
clusters = model.fit_predict(data)
multisets = [[Counter(), []] for i in range(self.num_dendrites)]
sparse_data = [[i for i, d in enumerate(datapoint) if d == 1] for datapoint in data]
for datapoint, cluster in zip(sparse_data, clusters):
multisets[cluster][0] = multisets[cluster][0] + Counter(datapoint)
multisets[cluster][1].append(set(datapoint))
for i, multiset in enumerate(multisets):
shared_elements = set(map(lambda x: x[0], filter(lambda x: x[1] > 1, multiset[0].most_common(self.dendrite_length))))
dendrite_connections = shared_elements
while len(shared_elements) < self.dendrite_length:
most_distant_point = multiset[1][numpy.argmin([len(dendrite_connections.intersection(point)) for point in multiset[1]])]
new_connection = random.sample(most_distant_point - dendrite_connections, 1)[0]
dendrite_connections.add(new_connection)
for synapse in dendrite_connections:
self.dendrites[synapse, current_dendrite] = 1.
current_dendrite += 1
else:
for i in range(data.nRows()):
ones = data.rowNonZeros(i)[0]
dendrite_connections = numpy.random.choice(ones, size = self.dendrite_length, replace = False)
for synapse in dendrite_connections:
self.dendrites[synapse, current_dendrite] = 1.
current_dendrite += 1
self.initialize_permanences()
def HTM_style_train_on_data(self, data, labels):
for i in range(data.nRows()):
self.HTM_style_train_on_datapoint(data.getSlice(i, i+1, 0, data.nCols()), labels[i])
def HTM_style_train_on_datapoint(self, datapoint, label):
"""
Run a version of permanence-based training on a datapoint. Due to the fixed dendrite count and dendrite length,
we are forced to more efficiently use each synapse, deleting synapses and resetting them if they are not found useful.
"""
activations = datapoint * self.dendrites
self.nonlinearity(activations)
#activations will quite likely still be sparse if using a threshold nonlinearity, so want to keep it sparse
activation = numpy.sign(activations.sum())
if label >= 1 and activation >= 0.5:
strongest_branch = activations.rowMax(0)[0]
datapoint.transpose()
inc_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_increment
inc_vector.elementNZMultiply(datapoint)
dec_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_decrement
dec_vector.elementNZMultiply(1 - datapoint)
self.permanences.setSlice(0, strongest_branch, self.permanences.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) + inc_vector - dec_vector)
positions, scores = self.permanences.colNonZeros(strongest_branch)[0], self.permanences.colNonZeros(strongest_branch)[1]
for position, score in zip(positions, scores):
if score < self.permanence_threshold:
self.dendrites[position, strongest_branch] = 0
self.permanences[position, strongest_branch] = 0
new_connection = random.sample(set(datapoint.colNonZeros(0)[0]) - set(self.dendrites.colNonZeros(strongest_branch)[0]), 1)[0]
self.dendrites[new_connection, strongest_branch] = 1.
self.permanences[new_connection, strongest_branch] = self.initial_permanence
elif label < 1 and activation >= 0.5:
# Need to weaken some connections
strongest_branch = activations.rowMax(0)[0]
dec_vector = self.dendrites.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) * self.permanence_decrement
datapoint.transpose()
dec_vector.elementNZMultiply(datapoint)
self.permanences.setSlice(0, strongest_branch, self.permanences.getSlice(0, self.dim, strongest_branch, strongest_branch + 1) - dec_vector)
elif label >= 1 and activation < 0.5:
# Need to create some new connections
weakest_branch = numpy.argmin(self.permanences.colSums())
if numpy.median(self.permanences.getCol(weakest_branch)) < self.permanence_threshold:
self.permanences.setColToZero(weakest_branch)
self.dendrites.setColToZero(weakest_branch)
ones = datapoint.rowNonZeros(0)[0]
dendrite_connections = numpy.random.choice(ones, size = self.dendrite_length, replace = False)
for synapse in dendrite_connections:
self.dendrites[synapse, weakest_branch] = 1.
self.permanences[synapse, weakest_branch] = self.initial_permanence
| agpl-3.0 |
piyush0609/scipy | scipy/stats/kde.py | 27 | 17303 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| bsd-3-clause |
HIPS/autograd | examples/black_box_svi.py | 3 | 3136 | from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
from autograd import grad
from autograd.misc.optimizers import adam
def black_box_variational_inference(logprob, D, num_samples):
"""Implements http://arxiv.org/abs/1401.0118, and uses the
local reparameterization trick from http://arxiv.org/abs/1506.02557"""
def unpack_params(params):
# Variational dist is a diagonal Gaussian.
mean, log_std = params[:D], params[D:]
return mean, log_std
def gaussian_entropy(log_std):
return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""Provides a stochastic estimate of the variational lower bound."""
mean, log_std = unpack_params(params)
samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
if __name__ == '__main__':
# Specify an inference problem by its unnormalized log-density.
D = 2
def log_density(x, t):
mu, log_sigma = x[:, 0], x[:, 1]
sigma_density = norm.logpdf(log_sigma, 0, 1.35)
mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
return sigma_density + mu_density
# Build variational objective.
objective, gradient, unpack_params = \
black_box_variational_inference(log_density, D, num_samples=2000)
# Set up plotting code
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
plt.cla()
target_distribution = lambda x : np.exp(log_density(x, t))
plot_isocontours(ax, target_distribution)
mean, log_std = unpack_params(params)
variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
plot_isocontours(ax, variational_contour)
plt.draw()
plt.pause(1.0/30.0)
print("Optimizing variational parameters...")
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = adam(gradient, init_var_params, step_size=0.1, num_iters=2000, callback=callback)
| mit |
nolanliou/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
kl0u/flink | flink-python/pyflink/fn_execution/operation_utils.py | 2 | 15214 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
from enum import Enum
from typing import Any, Tuple, Dict, List
from pyflink.common import Row
from pyflink.datastream.time_domain import TimeDomain
from pyflink.fn_execution import flink_fn_execution_pb2, pickle
from pyflink.serializers import PickleSerializer
from pyflink.table import functions
from pyflink.table.udf import DelegationTableFunction, DelegatingScalarFunction, \
ImperativeAggregateFunction, PandasAggregateFunctionWrapper
_func_num = 0
_constant_num = 0
def wrap_pandas_result(it):
import pandas as pd
arrays = []
for result in it:
if isinstance(result, (Row, Tuple)):
arrays.append(pd.concat([pd.Series([item]) for item in result], axis=1))
else:
arrays.append(pd.Series([result]))
return arrays
def wrap_inputs_as_row(*args):
from pyflink.common.types import Row
import pandas as pd
if type(args[0]) == pd.Series:
return pd.concat(args, axis=1)
elif len(args) == 1 and isinstance(args[0], (pd.DataFrame, Row, Tuple)):
return args[0]
else:
return Row(*args)
def extract_over_window_user_defined_function(user_defined_function_proto):
window_index = user_defined_function_proto.window_index
return (*extract_user_defined_function(user_defined_function_proto, True), window_index)
def extract_user_defined_function(user_defined_function_proto, pandas_udaf=False)\
-> Tuple[str, Dict, List]:
"""
Extracts user-defined-function from the proto representation of a
:class:`UserDefinedFunction`.
:param user_defined_function_proto: the proto representation of the Python
:param pandas_udaf: whether the user_defined_function_proto is pandas udaf
:class:`UserDefinedFunction`
"""
def _next_func_num():
global _func_num
_func_num = _func_num + 1
return _func_num
def _extract_input(args) -> Tuple[str, Dict, List]:
local_variable_dict = {}
local_funcs = []
args_str = []
for arg in args:
if arg.HasField("udf"):
# for chaining Python UDF input: the input argument is a Python ScalarFunction
udf_arg, udf_variable_dict, udf_funcs = extract_user_defined_function(arg.udf)
args_str.append(udf_arg)
local_variable_dict.update(udf_variable_dict)
local_funcs.extend(udf_funcs)
elif arg.HasField("inputOffset"):
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
args_str.append(constant_value_name)
local_variable_dict[constant_value_name] = parsed_constant_value
return ",".join(args_str), local_variable_dict, local_funcs
variable_dict = {}
user_defined_funcs = []
user_defined_func = pickle.loads(user_defined_function_proto.payload)
if pandas_udaf:
user_defined_func = PandasAggregateFunctionWrapper(user_defined_func)
func_name = 'f%s' % _next_func_num()
if isinstance(user_defined_func, DelegatingScalarFunction) \
or isinstance(user_defined_func, DelegationTableFunction):
variable_dict[func_name] = user_defined_func.func
else:
variable_dict[func_name] = user_defined_func.eval
user_defined_funcs.append(user_defined_func)
func_args, input_variable_dict, input_funcs = _extract_input(user_defined_function_proto.inputs)
variable_dict.update(input_variable_dict)
user_defined_funcs.extend(input_funcs)
if user_defined_function_proto.takes_row_as_input:
variable_dict['wrap_inputs_as_row'] = wrap_inputs_as_row
func_str = "%s(wrap_inputs_as_row(%s))" % (func_name, func_args)
else:
func_str = "%s(%s)" % (func_name, func_args)
return func_str, variable_dict, user_defined_funcs
def _parse_constant_value(constant_value) -> Tuple[str, Any]:
j_type = constant_value[0]
serializer = PickleSerializer()
pickled_data = serializer.loads(constant_value[1:])
# the type set contains
# TINYINT,SMALLINT,INTEGER,BIGINT,FLOAT,DOUBLE,DECIMAL,CHAR,VARCHAR,NULL,BOOLEAN
# the pickled_data doesn't need to transfer to anther python object
if j_type == 0:
parsed_constant_value = pickled_data
# the type is DATE
elif j_type == 1:
parsed_constant_value = \
datetime.date(year=1970, month=1, day=1) + datetime.timedelta(days=pickled_data)
# the type is TIME
elif j_type == 2:
seconds, milliseconds = divmod(pickled_data, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parsed_constant_value = datetime.time(hours, minutes, seconds, milliseconds * 1000)
# the type is TIMESTAMP
elif j_type == 3:
parsed_constant_value = \
datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0) \
+ datetime.timedelta(milliseconds=pickled_data)
else:
raise Exception("Unknown type %s, should never happen" % str(j_type))
def _next_constant_num():
global _constant_num
_constant_num = _constant_num + 1
return _constant_num
constant_value_name = 'c%s' % _next_constant_num()
return constant_value_name, parsed_constant_value
def extract_user_defined_aggregate_function(
current_index,
user_defined_function_proto,
distinct_info_dict: Dict[Tuple[List[str]], Tuple[List[int], List[int]]]):
user_defined_agg = load_aggregate_function(user_defined_function_proto.payload)
assert isinstance(user_defined_agg, ImperativeAggregateFunction)
args_str = []
local_variable_dict = {}
for arg in user_defined_function_proto.inputs:
if arg.HasField("inputOffset"):
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
for key, value in local_variable_dict.items():
if value == parsed_constant_value:
constant_value_name = key
break
if constant_value_name not in local_variable_dict:
local_variable_dict[constant_value_name] = parsed_constant_value
args_str.append(constant_value_name)
if user_defined_function_proto.distinct:
if tuple(args_str) in distinct_info_dict:
distinct_info_dict[tuple(args_str)][0].append(current_index)
distinct_info_dict[tuple(args_str)][1].append(user_defined_function_proto.filter_arg)
distinct_index = distinct_info_dict[tuple(args_str)][0][0]
else:
distinct_info_dict[tuple(args_str)] = \
([current_index], [user_defined_function_proto.filter_arg])
distinct_index = current_index
else:
distinct_index = -1
if user_defined_function_proto.takes_row_as_input:
local_variable_dict['wrap_inputs_as_row'] = wrap_inputs_as_row
func_str = "lambda value : [wrap_inputs_as_row(%s)]" % ",".join(args_str)
else:
func_str = "lambda value : (%s,)" % ",".join(args_str)
return user_defined_agg, \
eval(func_str, local_variable_dict) \
if args_str else lambda v: tuple(), \
user_defined_function_proto.filter_arg, \
distinct_index
def is_built_in_function(payload):
# The payload may be a pickled bytes or the class name of the built-in functions.
# If it represents a built-in function, it will start with 0x00.
# If it is a pickled bytes, it will start with 0x80.
return payload[0] == 0
def load_aggregate_function(payload):
if is_built_in_function(payload):
built_in_function_class_name = payload[1:].decode("utf-8")
cls = getattr(functions, built_in_function_class_name)
return cls()
else:
return pickle.loads(payload)
def extract_data_stream_stateless_function(udf_proto):
"""
Extracts user-defined-function from the proto representation of a
:class:`Function`.
:param udf_proto: the proto representation of the Python :class:`Function`
"""
func_type = udf_proto.function_type
UserDefinedDataStreamFunction = flink_fn_execution_pb2.UserDefinedDataStreamFunction
func = None
user_defined_func = pickle.loads(udf_proto.payload)
if func_type == UserDefinedDataStreamFunction.MAP:
func = user_defined_func.map
elif func_type == UserDefinedDataStreamFunction.FLAT_MAP:
func = user_defined_func.flat_map
elif func_type == UserDefinedDataStreamFunction.REDUCE:
reduce_func = user_defined_func.reduce
def wrapped_func(value):
return reduce_func(value[0], value[1])
func = wrapped_func
elif func_type == UserDefinedDataStreamFunction.CO_MAP:
co_map_func = user_defined_func
def wrapped_func(value):
# value in format of: [INPUT_FLAG, REAL_VALUE]
# INPUT_FLAG value of True for the left stream, while False for the right stream
return Row(CoMapFunctionOutputFlag.LEFT.value, co_map_func.map1(value[1])) \
if value[0] else Row(CoMapFunctionOutputFlag.RIGHT.value,
co_map_func.map2(value[2]))
func = wrapped_func
elif func_type == UserDefinedDataStreamFunction.CO_FLAT_MAP:
co_flat_map_func = user_defined_func
def wrapped_func(value):
if value[0]:
result = co_flat_map_func.flat_map1(value[1])
if result:
for result_val in result:
yield Row(CoFlatMapFunctionOutputFlag.LEFT.value, result_val)
yield Row(CoFlatMapFunctionOutputFlag.LEFT_END.value, None)
else:
result = co_flat_map_func.flat_map2(value[2])
if result:
for result_val in result:
yield Row(CoFlatMapFunctionOutputFlag.RIGHT.value, result_val)
yield Row(CoFlatMapFunctionOutputFlag.RIGHT_END.value, None)
func = wrapped_func
elif func_type == UserDefinedDataStreamFunction.TIMESTAMP_ASSIGNER:
extract_timestamp = user_defined_func.extract_timestamp
def wrapped_func(value):
pre_timestamp = value[0]
real_data = value[1]
return extract_timestamp(real_data, pre_timestamp)
func = wrapped_func
return func, user_defined_func
def extract_process_function(user_defined_function_proto, ctx):
process_function = pickle.loads(user_defined_function_proto.payload)
process_element = process_function.process_element
def wrapped_process_function(value):
# VALUE[CURRENT_TIMESTAMP, CURRENT_WATERMARK, NORMAL_DATA]
ctx.set_timestamp(value[0])
ctx.timer_service().set_current_watermark(value[1])
output_result = process_element(value[2], ctx)
return output_result
return wrapped_process_function, process_function
def extract_keyed_process_function(user_defined_function_proto, ctx, on_timer_ctx,
collector, keyed_state_backend):
process_function = pickle.loads(user_defined_function_proto.payload)
process_element = process_function.process_element
on_timer = process_function.on_timer
def wrapped_keyed_process_function(value):
if value[0] is not None:
# it is timer data
# VALUE: TIMER_FLAG, TIMESTAMP_OF_TIMER, CURRENT_WATERMARK, CURRENT_KEY_OF_TIMER, None
on_timer_ctx.set_timestamp(value[1])
on_timer_ctx.timer_service().set_current_watermark(value[2])
current_key = value[3]
on_timer_ctx.set_current_key(current_key)
keyed_state_backend.set_current_key(current_key)
if value[0] == KeyedProcessFunctionInputFlag.EVENT_TIME_TIMER.value:
on_timer_ctx.set_time_domain(TimeDomain.EVENT_TIME)
elif value[0] == KeyedProcessFunctionInputFlag.PROC_TIME_TIMER.value:
on_timer_ctx.set_time_domain(TimeDomain.PROCESSING_TIME)
else:
raise TypeError("TimeCharacteristic[%s] is not supported." % str(value[0]))
output_result = on_timer(value[1], on_timer_ctx)
else:
# it is normal data
# VALUE: TIMER_FLAG, CURRENT_TIMESTAMP, CURRENT_WATERMARK, None, NORMAL_DATA
# NORMAL_DATA: CURRENT_KEY, DATA
ctx.set_timestamp(value[1])
ctx.timer_service().set_current_watermark(value[2])
current_key = value[4][0]
ctx.set_current_key(current_key)
keyed_state_backend.set_current_key(Row(current_key))
output_result = process_element(value[4][1], ctx)
if output_result:
for result in output_result:
yield Row(None, None, None, result)
for result in collector.buf:
# 0: proc time timer data
# 1: event time timer data
# 2: normal data
# result_row: [TIMER_FLAG, TIMER TYPE, TIMER_KEY, RESULT_DATA]
yield Row(result[0], result[1], result[2], None)
collector.clear()
return wrapped_keyed_process_function, process_function
"""
All these Enum Classes MUST be in sync with
org.apache.flink.streaming.api.utils.PythonOperatorUtils if there are any changes.
"""
class KeyedProcessFunctionInputFlag(Enum):
EVENT_TIME_TIMER = 0
PROC_TIME_TIMER = 1
NORMAL_DATA = 2
class KeyedProcessFunctionOutputFlag(Enum):
REGISTER_EVENT_TIMER = 0
REGISTER_PROC_TIMER = 1
NORMAL_DATA = 2
DEL_EVENT_TIMER = 3
DEL_PROC_TIMER = 4
class CoFlatMapFunctionOutputFlag(Enum):
LEFT = 0
RIGHT = 1
LEFT_END = 2
RIGHT_END = 3
class CoMapFunctionOutputFlag(Enum):
LEFT = 0
RIGHT = 1
| apache-2.0 |
richardotis/scipy | scipy/stats/_distn_infrastructure.py | 11 | 112903 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (``1 - cdf`` --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the
# lowest supported numpy version is >= 1.7.0, this special case can be
# removed (see gh-4314).
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for integration, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
kl0u/flink | flink-python/pyflink/table/table_environment.py | 1 | 88322 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import sys
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from typing import Union, List, Tuple, Iterable
from py4j.java_gateway import get_java_class, get_method
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.sources import TableSource
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.data_stream import DataStream
from pyflink.common import JobExecutionResult
from pyflink.dataset import ExecutionEnvironment
from pyflink.java_gateway import get_gateway
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.table import Table, EnvironmentSettings, Module, Expression, ExplainDetail, TableSink
from pyflink.table.catalog import Catalog
from pyflink.table.descriptors import StreamTableDescriptor, BatchTableDescriptor, \
ConnectorDescriptor, ConnectTableDescriptor
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.statement_set import StatementSet
from pyflink.table.table_config import TableConfig
from pyflink.table.table_result import TableResult
from pyflink.table.types import _to_java_type, _create_type_verifier, RowType, DataType, \
_infer_schema_from_data, _create_converter, from_arrow_type, RowField, create_arrow_schema, \
_to_java_data_type
from pyflink.table.udf import UserDefinedFunctionWrapper, AggregateFunction, udaf, \
UserDefinedAggregateFunctionWrapper, udtaf, TableAggregateFunction
from pyflink.table.utils import to_expression_jarray
from pyflink.util import utils
from pyflink.util.utils import get_j_env_configuration, is_local_deployment, load_java_class, \
to_j_explain_detail_arr
__all__ = [
'BatchTableEnvironment',
'StreamTableEnvironment',
'TableEnvironment'
]
class TableEnvironment(object, metaclass=ABCMeta):
"""
A table environment is the base class, entry point, and central context for creating Table
and SQL API programs.
It is unified for bounded and unbounded data processing.
A table environment is responsible for:
- Connecting to external systems.
- Registering and retrieving :class:`~pyflink.table.Table` and other meta objects from a
catalog.
- Executing SQL statements.
- Offering further configuration options.
The path in methods such as :func:`create_temporary_view`
should be a proper SQL identifier. The syntax is following
[[catalog-name.]database-name.]object-name, where the catalog name and database are optional.
For path resolution see :func:`use_catalog` and :func:`use_database`. All keywords or other
special characters need to be escaped.
Example: `cat.1`.`db`.`Table` resolves to an object named 'Table' (table is a reserved
keyword, thus must be escaped) in a catalog named 'cat.1' and database named 'db'.
.. note::
This environment is meant for pure table programs. If you would like to convert from or to
other Flink APIs, it might be necessary to use one of the available language-specific table
environments in the corresponding bridging modules.
"""
def __init__(self, j_tenv, serializer=PickleSerializer()):
self._j_tenv = j_tenv
self._is_blink_planner = TableEnvironment._judge_blink_planner(j_tenv)
self._serializer = serializer
# When running in MiniCluster, launch the Python UDF worker using the Python executable
# specified by sys.executable if users have not specified it explicitly via configuration
# python.executable.
self._set_python_executable_for_local_executor()
@staticmethod
def _judge_blink_planner(j_tenv):
if "getPlanner" not in dir(j_tenv):
return False
else:
j_planner_class = j_tenv.getPlanner().getClass()
j_blink_planner_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.planner.delegation.PlannerBase)
return j_blink_planner_class.isAssignableFrom(j_planner_class)
def from_table_source(self, table_source: 'TableSource') -> 'Table':
"""
Creates a table from a table source.
Example:
::
>>> csv_table_source = CsvTableSource(
... csv_file_path, ['a', 'b'], [DataTypes.STRING(), DataTypes.BIGINT()])
>>> table_env.from_table_source(csv_table_source)
:param table_source: The table source used as table.
:return: The result table.
"""
warnings.warn("Deprecated in 1.11.", DeprecationWarning)
return Table(self._j_tenv.fromTableSource(table_source._j_table_source), self)
def register_catalog(self, catalog_name: str, catalog: Catalog):
"""
Registers a :class:`~pyflink.table.catalog.Catalog` under a unique name.
All tables registered in the :class:`~pyflink.table.catalog.Catalog` can be accessed.
:param catalog_name: The name under which the catalog will be registered.
:param catalog: The catalog to register.
"""
self._j_tenv.registerCatalog(catalog_name, catalog._j_catalog)
def get_catalog(self, catalog_name: str) -> Catalog:
"""
Gets a registered :class:`~pyflink.table.catalog.Catalog` by name.
:param catalog_name: The name to look up the :class:`~pyflink.table.catalog.Catalog`.
:return: The requested catalog, None if there is no
registered catalog with given name.
"""
catalog = self._j_tenv.getCatalog(catalog_name)
if catalog.isPresent():
return Catalog(catalog.get())
else:
return None
def load_module(self, module_name: str, module: Module):
"""
Loads a :class:`~pyflink.table.Module` under a unique name. Modules will be kept
in the loaded order.
ValidationException is thrown when there is already a module with the same name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
:param module: The module instance.
.. versionadded:: 1.12.0
"""
self._j_tenv.loadModule(module_name, module._j_module)
def unload_module(self, module_name: str):
"""
Unloads a :class:`~pyflink.table.Module` with given name.
ValidationException is thrown when there is no module with the given name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
.. versionadded:: 1.12.0
"""
self._j_tenv.unloadModule(module_name)
def create_java_temporary_system_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_java_temporary_function`, system functions are
identified by a global name that is independent of the current catalog and current
database. Thus, this method allows to extend the set of built-in system functions like
TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_java_temporary_system_function("func",
... "java.user.defined.function.class.name")
:param name: The name under which the function will be registered globally.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporarySystemFunction(name, java_function)
def create_temporary_system_function(self, name: str,
function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_temporary_function`, system functions are identified
by a global name that is independent of the current catalog and current database. Thus,
this method allows to extend the set of built-in system functions like TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_temporary_system_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_system_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_system_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function will be registered globally.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporarySystemFunction(name, java_function)
def drop_temporary_system_function(self, name: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param name: The name under which the function has been registered globally.
:return: true if a function existed under the given name and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporarySystemFunction(name)
def create_java_function(self, path: str, function_class_name: str,
ignore_if_exists: bool = None):
"""
Registers a java user defined function class as a catalog function in the given path.
Compared to system functions with a globally defined name, catalog functions are always
(implicitly or explicitly) identified by a catalog and database.
There must not be another function (temporary or permanent) registered under the same path.
Example:
::
>>> table_env.create_java_function("func", "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
:param ignore_if_exists: If a function exists under the given path and this flag is set,
no operation is executed. An exception is thrown otherwise.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
if ignore_if_exists is None:
self._j_tenv.createFunction(path, java_function)
else:
self._j_tenv.createFunction(path, java_function, ignore_if_exists)
def drop_function(self, path: str) -> bool:
"""
Drops a catalog function registered in the given path.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropFunction(path)
def create_java_temporary_function(self, path: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_java_temporary_system_function` with a globally
defined name, catalog functions are always (implicitly or explicitly) identified by a
catalog and database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_java_temporary_function("func",
... "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporaryFunction(path, java_function)
def create_temporary_function(self, path: str, function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_temporary_system_function` with a globally defined
name, catalog functions are always (implicitly or explicitly) identified by a catalog and
database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_temporary_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporaryFunction(path, java_function)
def drop_temporary_function(self, path: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporaryFunction(path)
def register_table(self, name: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` under a unique name in the TableEnvironment's
catalog. Registered tables can be referenced in SQL queries.
Example:
::
>>> tab = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
>>> table_env.register_table("source", tab)
:param name: The name under which the table will be registered.
:param table: The table to register.
.. note:: Deprecated in 1.10. Use :func:`create_temporary_view` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_temporary_view instead.", DeprecationWarning)
self._j_tenv.registerTable(name, table._j_table)
def register_table_source(self, name: str, table_source: TableSource):
"""
Registers an external :class:`~pyflink.table.TableSource` in this
:class:`~pyflink.table.TableEnvironment`'s catalog. Registered tables can be referenced in
SQL queries.
Example:
::
>>> table_env.register_table_source("source",
... CsvTableSource("./1.csv",
... ["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()]))
:param name: The name under which the table source is registered.
:param table_source: The table source to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSourceInternal(name, table_source._j_table_source)
def register_table_sink(self, name: str, table_sink: TableSink):
"""
Registers an external :class:`~pyflink.table.TableSink` with given field names and types in
this :class:`~pyflink.table.TableEnvironment`'s catalog. Registered sink tables can be
referenced in SQL DML statements.
Example:
::
>>> table_env.register_table_sink("sink",
... CsvTableSink(["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()],
... "./2.csv"))
:param name: The name under which the table sink is registered.
:param table_sink: The table sink to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSinkInternal(name, table_sink._j_table_sink)
def scan(self, *table_path: str) -> Table:
"""
Scans a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the TableEnvironment. It can be either directly
registered or be an external member of a :class:`~pyflink.table.catalog.Catalog`.
See the documentation of :func:`~pyflink.table.TableEnvironment.use_database` or
:func:`~pyflink.table.TableEnvironment.use_catalog` for the rules on the path resolution.
Examples:
Scanning a directly registered table
::
>>> tab = table_env.scan("tableName")
Scanning a table from a registered catalog
::
>>> tab = table_env.scan("catalogName", "dbName", "tableName")
:param table_path: The path of the table to scan.
:throws: Exception if no table is found using the given table path.
:return: The resulting table.
.. note:: Deprecated in 1.10. Use :func:`from_path` instead.
"""
warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
gateway = get_gateway()
j_table_paths = utils.to_jarray(gateway.jvm.String, table_path)
j_table = self._j_tenv.scan(j_table_paths)
return Table(j_table, self)
def from_path(self, path: str) -> Table:
"""
Reads a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the :class:`~pyflink.table.TableEnvironment`.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Examples:
Reading a table from default catalog and database.
::
>>> tab = table_env.from_path("tableName")
Reading a table from a registered catalog.
::
>>> tab = table_env.from_path("catalogName.dbName.tableName")
Reading a table from a registered catalog with escaping. (`Table` is a reserved keyword).
Dots in e.g. a database name also must be escaped.
::
>>> tab = table_env.from_path("catalogName.`db.Name`.`Table`")
:param path: The path of a table API object to scan.
:return: Either a table or virtual table (=view).
.. seealso:: :func:`use_catalog`
.. seealso:: :func:`use_database`
.. versionadded:: 1.10.0
"""
return Table(get_method(self._j_tenv, "from")(path), self)
def insert_into(self, target_path: str, table: Table):
"""
Instructs to write the content of a :class:`~pyflink.table.Table` API object into a table.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Example:
::
>>> tab = table_env.scan("tableName")
>>> table_env.insert_into("sink", tab)
:param target_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:param table: The Table to write to the sink.
.. versionchanged:: 1.10.0
The signature is changed, e.g. the parameter *table_path_continued* was removed and
the parameter *target_path* is moved before the parameter *table*.
.. note:: Deprecated in 1.11. Use :func:`execute_insert` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use Table#execute_insert for single sink,"
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._j_tenv.insertInto(target_path, table._j_table)
def list_catalogs(self) -> List[str]:
"""
Gets the names of all catalogs registered in this environment.
:return: List of catalog names.
"""
j_catalog_name_array = self._j_tenv.listCatalogs()
return [item for item in j_catalog_name_array]
def list_modules(self) -> List[str]:
"""
Gets the names of all modules registered in this environment.
:return: List of module names.
.. versionadded:: 1.10.0
"""
j_module_name_array = self._j_tenv.listModules()
return [item for item in j_module_name_array]
def list_databases(self) -> List[str]:
"""
Gets the names of all databases in the current catalog.
:return: List of database names in the current catalog.
"""
j_database_name_array = self._j_tenv.listDatabases()
return [item for item in j_database_name_array]
def list_tables(self) -> List[str]:
"""
Gets the names of all tables and views in the current database of the current catalog.
It returns both temporary and permanent tables and views.
:return: List of table and view names in the current database of the current catalog.
"""
j_table_name_array = self._j_tenv.listTables()
return [item for item in j_table_name_array]
def list_views(self) -> List[str]:
"""
Gets the names of all views in the current database of the current catalog.
It returns both temporary and permanent views.
:return: List of view names in the current database of the current catalog.
.. versionadded:: 1.11.0
"""
j_view_name_array = self._j_tenv.listViews()
return [item for item in j_view_name_array]
def list_user_defined_functions(self) -> List[str]:
"""
Gets the names of all user defined functions registered in this environment.
:return: List of the names of all user defined functions registered in this environment.
"""
j_udf_name_array = self._j_tenv.listUserDefinedFunctions()
return [item for item in j_udf_name_array]
def list_functions(self) -> List[str]:
"""
Gets the names of all functions in this environment.
:return: List of the names of all functions in this environment.
.. versionadded:: 1.10.0
"""
j_function_name_array = self._j_tenv.listFunctions()
return [item for item in j_function_name_array]
def list_temporary_tables(self) -> List[str]:
"""
Gets the names of all temporary tables and views available in the current namespace
(the current database of the current catalog).
:return: A list of the names of all registered temporary tables and views in the current
database of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_table_name_array = self._j_tenv.listTemporaryTables()
return [item for item in j_table_name_array]
def list_temporary_views(self) -> List[str]:
"""
Gets the names of all temporary views available in the current namespace (the current
database of the current catalog).
:return: A list of the names of all registered temporary views in the current database
of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_view_name_array = self._j_tenv.listTemporaryViews()
return [item for item in j_view_name_array]
def drop_temporary_table(self, table_path: str) -> bool:
"""
Drops a temporary table registered in the given path.
If a permanent table with a given path exists, it will be used
from now on for any queries that reference this path.
:param table_path: The path of the registered temporary table.
:return: True if a table existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryTable(table_path)
def drop_temporary_view(self, view_path: str) -> bool:
"""
Drops a temporary view registered in the given path.
If a permanent table or view with a given path exists, it will be used
from now on for any queries that reference this path.
:return: True if a view existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryView(view_path)
def explain(self, table: Table = None, extended: bool = False) -> str:
"""
Returns the AST of the specified Table API and SQL queries and the execution plan to compute
the result of the given :class:`~pyflink.table.Table` or multi-sinks plan.
:param table: The table to be explained. If table is None, explain for multi-sinks plan,
else for given table.
:param extended: If the plan should contain additional properties.
e.g. estimated cost, traits
:return: The table for which the AST and execution plan will be returned.
.. note:: Deprecated in 1.11. Use :class:`Table`#:func:`explain` instead.
"""
warnings.warn("Deprecated in 1.11. Use Table#explain instead.", DeprecationWarning)
if table is None:
return self._j_tenv.explain(extended)
else:
return self._j_tenv.explain(table._j_table, extended)
def explain_sql(self, stmt: str, *extra_details: ExplainDetail) -> str:
"""
Returns the AST of the specified statement and the execution plan.
:param stmt: The statement for which the AST and execution plan will be returned.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: The statement for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_tenv.explainSql(stmt, j_extra_details)
def sql_query(self, query: str) -> Table:
"""
Evaluates a SQL query on registered tables and retrieves the result as a
:class:`~pyflink.table.Table`.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
>>> table = ...
# the table is not registered to the table environment
>>> table_env.sql_query("SELECT * FROM %s" % table)
:param query: The sql query string.
:return: The result table.
"""
j_table = self._j_tenv.sqlQuery(query)
return Table(j_table, self)
def execute_sql(self, stmt: str) -> TableResult:
"""
Execute the given single statement, and return the execution result.
The statement can be DDL/DML/DQL/SHOW/DESCRIBE/EXPLAIN/USE.
For DML and DQL, this method returns TableResult once the job has been submitted.
For DDL and DCL statements, TableResult is returned once the operation has finished.
:return content for DQL/SHOW/DESCRIBE/EXPLAIN,
the affected row count for `DML` (-1 means unknown),
or a string message ("OK") for other statements.
.. versionadded:: 1.11.0
"""
self._before_execute()
return TableResult(self._j_tenv.executeSql(stmt))
def create_statement_set(self) -> StatementSet:
"""
Create a StatementSet instance which accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
:return statement_set instance
.. versionadded:: 1.11.0
"""
_j_statement_set = self._j_tenv.createStatementSet()
return StatementSet(_j_statement_set, self)
def sql_update(self, stmt: str):
"""
Evaluates a SQL statement such as INSERT, UPDATE or DELETE or a DDL statement
.. note::
Currently only SQL INSERT statements and CREATE TABLE statements are supported.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
# register the table sink into which the result is inserted.
>>> table_env.register_table_sink("sink_table", table_sink)
>>> source_table = ...
# source_table is not registered to the table environment
>>> table_env.sql_update("INSERT INTO sink_table SELECT * FROM %s" % source_table)
A DDL statement can also be executed to create/drop a table:
For example, the below DDL statement would create a CSV table named `tbl1`
into the current catalog::
create table tbl1(
a int,
b bigint,
c varchar
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = 'xxx'
)
SQL queries can directly execute as follows:
::
>>> source_ddl = \\
... '''
... create table sourceTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'kafka',
... 'update-mode' = 'append',
... 'connector.topic' = 'xxx',
... 'connector.properties.bootstrap.servers' = 'localhost:9092'
... )
... '''
>>> sink_ddl = \\
... '''
... create table sinkTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'filesystem',
... 'format.type' = 'csv',
... 'connector.path' = 'xxx'
... )
... '''
>>> query = "INSERT INTO sinkTable SELECT FROM sourceTable"
>>> table_env.sql(source_ddl)
>>> table_env.sql(sink_ddl)
>>> table_env.sql(query)
>>> table_env.execute("MyJob")
:param stmt: The SQL statement to evaluate.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single statement,
use :func:`create_statement_set` for multiple DML statements.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single statement, "
"use create_statement_set for multiple DML statements.", DeprecationWarning)
self._j_tenv.sqlUpdate(stmt)
def get_current_catalog(self) -> str:
"""
Gets the current default catalog name of the current session.
:return: The current default catalog name that is used for the path resolution.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
"""
return self._j_tenv.getCurrentCatalog()
def use_catalog(self, catalog_name: str):
"""
Sets the current catalog to the given value. It also sets the default
database to the catalog's default one.
See also :func:`~TableEnvironment.use_database`.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:param catalog_name: The name of the catalog to set as the current default catalog.
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if a catalog with given
name could not be set as the default one.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
self._j_tenv.useCatalog(catalog_name)
def get_current_database(self) -> str:
"""
Gets the current default database name of the running session.
:return: The name of the current database of the current catalog.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
return self._j_tenv.getCurrentDatabase()
def use_database(self, database_name: str):
"""
Sets the current default database. It has to exist in the current catalog. That path will
be used as the default one when looking for unqualified object names.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if the given catalog and
database could not be set as the default ones.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
:param database_name: The name of the database to set as the current database.
"""
self._j_tenv.useDatabase(database_name)
def get_config(self) -> TableConfig:
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
"""
if not hasattr(self, "table_config"):
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
setattr(self, "table_config", table_config)
return getattr(self, "table_config")
@abstractmethod
def connect(self, connector_descriptor: ConnectorDescriptor) -> ConnectTableDescriptor:
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
Example:
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:return: A :class:`~pyflink.table.descriptors.ConnectTableDescriptor` used to build the
temporary table.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
pass
def register_java_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function under a unique name. Replaces already existing
user-defined functions under this name. The acceptable function type contains
**ScalarFunction**, **TableFunction** and **AggregateFunction**.
Example:
::
>>> table_env.register_java_function("func1", "java.user.defined.function.class.name")
:param name: The name under which the function is registered.
:param function_class_name: The java full qualified class name of the function to register.
The function must have a public no-argument constructor and can
be founded in current Java classloader.
.. note:: Deprecated in 1.12. Use :func:`create_java_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_java_temporary_system_function` "
"instead.", DeprecationWarning)
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader()\
.loadClass(function_class_name).newInstance()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self._is_blink_planner and isinstance(self, BatchTableEnvironment):
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def register_function(self, name: str, function: UserDefinedFunctionWrapper):
"""
Registers a python user-defined function under a unique name. Replaces already existing
user-defined function under this name.
Example:
::
>>> table_env.register_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.register_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.register_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function is registered.
:param function: The python user-defined function to register.
.. versionadded:: 1.10.0
.. note:: Deprecated in 1.12. Use :func:`create_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_temporary_system_function` "
"instead.", DeprecationWarning)
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self._is_blink_planner and isinstance(self, BatchTableEnvironment):
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def create_temporary_view(self, view_path: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` API object as a temporary view similar to SQL
temporary views.
Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
it will be inaccessible in the current session. To make the permanent object available
again you can drop the corresponding temporary object.
:param view_path: The path under which the view will be registered. See also the
:class:`~pyflink.table.TableEnvironment` class description for the format
of the path.
:param table: The view to register.
.. versionadded:: 1.10.0
"""
self._j_tenv.createTemporaryView(view_path, table._j_table)
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_files = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self,
requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> table_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> table_env.add_python_archive("py_env.zip", "myenv")
>>> table_env.get_config().set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
python_archives = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def execute(self, job_name: str) -> JobExecutionResult:
"""
Triggers the program execution. The environment will execute all parts of
the program.
The program execution will be logged and displayed with the provided name.
.. note::
It is highly advised to set all parameters in the :class:`~pyflink.table.TableConfig`
on the very beginning of the program. It is undefined what configurations values will
be used for the execution if queries are mixed with config changes. It depends on
the characteristic of the particular parameter. For some of them the value from the
point in time of query construction (e.g. the current catalog) will be used. On the
other hand some values might be evaluated according to the state from the time when
this method is called (e.g. timezone).
:param job_name: Desired name of the job.
:return: The result of the job execution, containing elapsed time and accumulators.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single sink, "
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._before_execute()
return JobExecutionResult(self._j_tenv.execute(job_name))
def from_elements(self, elements: Iterable, schema: Union[DataType, List[str]] = None,
verify_schema: bool = True) -> Table:
"""
Creates a table from a collection of elements.
The elements types must be acceptable atomic types or acceptable composite types.
All elements must be of the same type.
If the elements types are composite types, the composite types must be strictly equal,
and its subtypes must also be acceptable types.
e.g. if the elements are tuples, the length of the tuples must be equal, the element types
of the tuples must be equal in order.
The built-in acceptable atomic element types contains:
**int**, **long**, **str**, **unicode**, **bool**,
**float**, **bytearray**, **datetime.date**, **datetime.time**, **datetime.datetime**,
**datetime.timedelta**, **decimal.Decimal**
The built-in acceptable composite element types contains:
**list**, **tuple**, **dict**, **array**, :class:`~pyflink.table.Row`
If the element type is a composite type, it will be unboxed.
e.g. table_env.from_elements([(1, 'Hi'), (2, 'Hello')]) will return a table like:
+----+-------+
| _1 | _2 |
+====+=======+
| 1 | Hi |
+----+-------+
| 2 | Hello |
+----+-------+
"_1" and "_2" are generated field names.
Example:
::
# use the second parameter to specify custom field names
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
# use the second parameter to specify custom table schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]))
# use the thrid parameter to switch whether to verify the elements against the schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]),
... False)
# create Table from expressions
>>> table_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING()),
... DataTypes.FIELD("c", DataTypes.FLOAT())]))
:param elements: The elements to create a table from.
:param schema: The schema of the table.
:param verify_schema: Whether to verify the elements against the schema.
:return: The result table.
"""
# verifies the elements against the specified schema
if isinstance(schema, RowType):
verify_func = _create_type_verifier(schema) if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
data_type = schema
schema = RowType().add("value", schema)
verify_func = _create_type_verifier(
data_type, name="field value") if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
else:
def verify_obj(obj):
return obj
# infers the schema if not specified
if schema is None or isinstance(schema, (list, tuple)):
schema = _infer_schema_from_data(elements, names=schema)
converter = _create_converter(schema)
elements = map(converter, elements)
elif not isinstance(schema, RowType):
raise TypeError(
"schema should be RowType, list, tuple or None, but got: %s" % schema)
elements = list(elements)
# in case all the elements are expressions
if len(elements) > 0 and all(isinstance(elem, Expression) for elem in elements):
if schema is None:
return Table(self._j_tenv.fromValues(to_expression_jarray(elements)), self)
else:
return Table(self._j_tenv.fromValues(_to_java_data_type(schema),
to_expression_jarray(elements)),
self)
elif any(isinstance(elem, Expression) for elem in elements):
raise ValueError("It doesn't support part of the elements are Expression, while the "
"others are not.")
# verifies the elements against the specified schema
elements = map(verify_obj, elements)
# converts python data to sql data
elements = [schema.to_sql_type(element) for element in elements]
return self._from_elements(elements, schema)
def _from_elements(self, elements: List, schema: Union[DataType, List[str]]) -> Table:
"""
Creates a table from a collection of elements.
:param elements: The elements to create a table from.
:return: The result :class:`~pyflink.table.Table`.
"""
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(self._serializer)
try:
with temp_file:
serializer.dump_to_stream(elements, temp_file)
row_type_info = _to_java_type(schema)
execution_config = self._get_j_env().getConfig()
gateway = get_gateway()
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True)
if self._is_blink_planner:
PythonTableUtils = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonInputFormatTableSource
else:
PythonTableUtils = gateway.jvm.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm.PythonInputFormatTableSource
j_input_format = PythonTableUtils.getInputFormat(
j_objs, row_type_info, execution_config)
j_table_source = PythonInputFormatTableSource(
j_input_format, row_type_info)
return Table(self._j_tenv.fromTableSource(j_table_source), self)
finally:
os.unlink(temp_file.name)
def from_pandas(self, pdf,
schema: Union[RowType, List[str], Tuple[str], List[DataType],
Tuple[DataType]] = None,
splits_num: int = 1) -> Table:
"""
Creates a table from a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
# use the second parameter to specify custom field names
>>> table_env.from_pandas(pdf, ["a", "b"])
# use the second parameter to specify custom field types
>>> table_env.from_pandas(pdf, [DataTypes.DOUBLE(), DataTypes.DOUBLE()]))
# use the second parameter to specify custom table schema
>>> table_env.from_pandas(pdf,
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
... DataTypes.FIELD("b", DataTypes.DOUBLE())]))
:param pdf: The pandas DataFrame.
:param schema: The schema of the converted table.
:param splits_num: The number of splits the given Pandas DataFrame will be split into. It
determines the number of parallel source tasks.
If not specified, the default parallelism will be used.
:return: The result table.
.. versionadded:: 1.11.0
"""
if not self._is_blink_planner and isinstance(self, BatchTableEnvironment):
raise TypeError("It doesn't support to convert from Pandas DataFrame in the batch "
"mode of old planner")
import pandas as pd
if not isinstance(pdf, pd.DataFrame):
raise TypeError("Unsupported type, expected pandas.DataFrame, got %s" % type(pdf))
import pyarrow as pa
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
if schema is not None:
if isinstance(schema, RowType):
result_type = schema
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], str):
result_type = RowType(
[RowField(field_name, from_arrow_type(field.type, field.nullable))
for field_name, field in zip(schema, arrow_schema)])
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], DataType):
result_type = RowType(
[RowField(field_name, field_type) for field_name, field_type in zip(
arrow_schema.names, schema)])
else:
raise TypeError("Unsupported schema type, it could only be of RowType, a "
"list of str or a list of DataType, got %s" % schema)
else:
result_type = RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_schema])
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
import pytz
serializer = ArrowSerializer(
create_arrow_schema(result_type.field_names(), result_type.field_types()),
result_type,
pytz.timezone(self.get_config().get_local_timezone()))
step = -(-len(pdf) // splits_num)
pdf_slices = [pdf.iloc[start:start + step] for start in range(0, len(pdf), step)]
data = [[c for (_, c) in pdf_slice.iteritems()] for pdf_slice in pdf_slices]
try:
with temp_file:
serializer.dump_to_stream(data, temp_file)
jvm = get_gateway().jvm
data_type = jvm.org.apache.flink.table.types.utils.TypeConversions\
.fromLegacyInfoToDataType(_to_java_type(result_type)).notNull()
if self._is_blink_planner:
data_type = data_type.bridgedTo(
load_java_class('org.apache.flink.table.data.RowData'))
j_arrow_table_source = \
jvm.org.apache.flink.table.runtime.arrow.ArrowUtils.createArrowTableSource(
data_type, temp_file.name)
return Table(self._j_tenv.fromTableSource(j_arrow_table_source), self)
finally:
os.unlink(temp_file.name)
def _set_python_executable_for_local_executor(self):
jvm = get_gateway().jvm
j_config = get_j_env_configuration(self)
if not j_config.containsKey(jvm.PythonOptions.PYTHON_EXECUTABLE.key()) \
and is_local_deployment(j_config):
j_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), sys.executable)
def _add_jars_to_j_env_config(self, config_key):
jvm = get_gateway().jvm
jar_urls = self.get_config().get_configuration().get_string(config_key, None)
if jar_urls is not None:
# normalize and remove duplicates
jar_urls_set = set([jvm.java.net.URL(url).toString() for url in jar_urls.split(";")])
j_configuration = get_j_env_configuration(self)
if j_configuration.containsKey(config_key):
for url in j_configuration.getString(config_key, "").split(";"):
jar_urls_set.add(url)
j_configuration.setString(config_key, ";".join(jar_urls_set))
@abstractmethod
def _get_j_env(self):
pass
@staticmethod
def _is_table_function(java_function):
java_function_class = java_function.getClass()
j_table_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.TableFunction)
return j_table_function_class.isAssignableFrom(java_function_class)
@staticmethod
def _is_aggregate_function(java_function):
java_function_class = java_function.getClass()
j_aggregate_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.ImperativeAggregateFunction)
return j_aggregate_function_class.isAssignableFrom(java_function_class)
def _register_table_function(self, name, table_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfTableFunction(table_function)
function_catalog.registerTempSystemTableFunction(name, table_function, result_type)
def _register_aggregate_function(self, name, aggregate_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfAggregateFunction(aggregate_function)
acc_type = helper.getAccumulatorTypeOfAggregateFunction(aggregate_function)
function_catalog.registerTempSystemAggregateFunction(
name, aggregate_function, result_type, acc_type)
def _get_function_catalog(self):
function_catalog_field = self._j_tenv.getClass().getDeclaredField("functionCatalog")
function_catalog_field.setAccessible(True)
function_catalog = function_catalog_field.get(self._j_tenv)
return function_catalog
def _before_execute(self):
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
self._add_jars_to_j_env_config(jars_key)
self._add_jars_to_j_env_config(classpaths_key)
def _wrap_aggregate_function_if_needed(self, function) -> UserDefinedFunctionWrapper:
if isinstance(function, (AggregateFunction, TableAggregateFunction,
UserDefinedAggregateFunctionWrapper)):
if not self._is_blink_planner:
raise Exception("Python UDAF and UDTAF are only supported in blink planner")
if isinstance(function, AggregateFunction):
function = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
elif isinstance(function, TableAggregateFunction):
function = udtaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
return function
class StreamTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
self._j_tenv = j_tenv
super(StreamTableEnvironment, self).__init__(j_tenv)
def _get_j_env(self):
if self._is_blink_planner:
return self._j_tenv.getPlanner().getExecEnv()
else:
return self._j_tenv.getPlanner().getExecutionEnvironment()
def connect(self, connector_descriptor: ConnectorDescriptor) -> StreamTableDescriptor:
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:return: A :class:`~pyflink.table.descriptors.StreamTableDescriptor` used to build the
temporary table.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
@staticmethod
def create(stream_execution_environment: StreamExecutionEnvironment = None,
table_config: TableConfig = None,
environment_settings: EnvironmentSettings = None) -> 'StreamTableEnvironment':
"""
Creates a :class:`~pyflink.table.StreamTableEnvironment`.
Example:
::
# create with StreamExecutionEnvironment.
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> table_env = StreamTableEnvironment.create(env)
# create with StreamExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = StreamTableEnvironment.create(env, table_config)
# create with StreamExecutionEnvironment and EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().use_blink_planner() \\
... .build()
>>> table_env = StreamTableEnvironment.create(
... env, environment_settings=environment_settings)
# create with EnvironmentSettings.
>>> table_env = StreamTableEnvironment.create(environment_settings=environment_settings)
:param stream_execution_environment: The
:class:`~pyflink.datastream.StreamExecutionEnvironment`
of the TableEnvironment.
:param table_config: The configuration of the TableEnvironment, optional.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:return: The StreamTableEnvironment created from given StreamExecutionEnvironment and
configuration.
"""
if stream_execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'stream_execution_environment' "
"or 'environment_settings' is required.")
elif stream_execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'stream_execution_environment' is also required.")
if table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if not environment_settings.is_streaming_mode():
raise ValueError("The environment settings for StreamTableEnvironment must be "
"set to streaming mode.")
if stream_execution_environment is None:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
environment_settings._j_environment_settings)
else:
if table_config is not None:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
table_config._j_table_config)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment)
return StreamTableEnvironment(j_tenv)
def from_data_stream(self, data_stream: DataStream, *fields: Union[str, Expression]) -> Table:
"""
Converts the given DataStream into a Table with specified field names.
There are two modes for mapping original fields to the fields of the Table:
1. Reference input fields by name:
All fields in the schema definition are referenced by name (and possibly renamed using
and alias (as). Moreover, we can define proctime and rowtime attributes at arbitrary
positions using arbitrary names (except those that exist in the result schema). In this
mode, fields can be reordered and projected out. This mode can be used for any input
type.
2. Reference input fields by position:
In this mode, fields are simply renamed. Event-time attributes can replace the field on
their position in the input data (if it is of correct type) or be appended at the end.
Proctime attributes must be appended at the end. This mode can only be used if the input
type has a defined field order (tuple, case class, Row) and none of the fields
references a field of the input type.
:param data_stream: The datastream to be converted.
:param fields: The fields expressions to map original fields of the DataStream to the fields
of the Table
:return: The converted Table.
.. versionadded:: 1.12.0
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.declareManagedMemory(
j_data_stream.getTransformation(),
self._get_j_env(),
self._j_tenv.getConfig())
if len(fields) == 0:
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream), t_env=self)
elif all(isinstance(f, Expression) for f in fields):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, to_expression_jarray(fields)), t_env=self)
elif len(fields) == 1 and isinstance(fields[0], str):
warnings.warn(
"Deprecated in 1.12. Use from_data_stream(DataStream, *Expression) instead.",
DeprecationWarning)
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream, fields[0]), t_env=self)
raise ValueError("Invalid arguments for 'fields': %r" % fields)
def to_append_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of a specified type. The Table must only have
insert (append) changes. If the Table is also modified by update or delete changes, the
conversion will fail.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation that specifies the type of the DataStream.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toAppendStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
def to_retract_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of add and retract messages. The message will be
encoded as Tuple. The first field is a boolean flag, the second field holds the record of
the specified type.
A true flag indicates an add message, a false flag indicates a retract message.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation of the requested record type.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toRetractStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
class BatchTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
self._j_tenv = j_tenv
super(BatchTableEnvironment, self).__init__(j_tenv)
def _get_j_env(self):
if self._is_blink_planner:
return self._j_tenv.getPlanner().getExecEnv()
else:
return self._j_tenv.execEnv()
def connect(self, connector_descriptor: ConnectorDescriptor) -> \
Union[BatchTableDescriptor, StreamTableDescriptor]:
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:return: A :class:`~pyflink.table.descriptors.BatchTableDescriptor` or a
:class:`~pyflink.table.descriptors.StreamTableDescriptor` (for blink planner) used
to build the temporary table.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
gateway = get_gateway()
blink_t_env_class = get_java_class(
gateway.jvm.org.apache.flink.table.api.internal.TableEnvironmentImpl)
if blink_t_env_class == self._j_tenv.getClass():
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
else:
return BatchTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
@staticmethod
def create(execution_environment: ExecutionEnvironment = None,
table_config: TableConfig = None,
environment_settings: EnvironmentSettings = None) -> 'BatchTableEnvironment':
"""
Creates a :class:`~pyflink.table.BatchTableEnvironment`.
Example:
::
# create with ExecutionEnvironment.
>>> env = ExecutionEnvironment.get_execution_environment()
>>> table_env = BatchTableEnvironment.create(env)
# create with ExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = BatchTableEnvironment.create(env, table_config)
# create with EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().in_batch_mode() \\
... .use_blink_planner().build()
>>> table_env = BatchTableEnvironment.create(environment_settings=environment_settings)
:param execution_environment: The batch :class:`~pyflink.dataset.ExecutionEnvironment` of
the TableEnvironment.
:param table_config: The configuration of the TableEnvironment, optional.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:return: The BatchTableEnvironment created from given ExecutionEnvironment and
configuration.
"""
if execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'execution_environment' "
"or 'environment_settings' is required.")
elif execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'execution_environment' is also required.")
elif execution_environment is not None and \
environment_settings is not None:
raise ValueError("The param 'execution_environment' and "
"'environment_settings' cannot be used at the same time")
elif table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if environment_settings.is_streaming_mode():
raise ValueError("The environment settings for BatchTableEnvironment must be "
"set to batch mode.")
JEnvironmentSettings = get_gateway().jvm.org.apache.flink.table.api.EnvironmentSettings
old_planner_class_name = EnvironmentSettings.new_instance().in_batch_mode() \
.use_old_planner().build()._j_environment_settings \
.toPlannerProperties()[JEnvironmentSettings.CLASS_NAME]
planner_properties = environment_settings._j_environment_settings.toPlannerProperties()
if JEnvironmentSettings.CLASS_NAME in planner_properties and \
planner_properties[JEnvironmentSettings.CLASS_NAME] == old_planner_class_name:
# The Java EnvironmentSettings API does not support creating table environment with
# old planner. Create it from other API.
j_tenv = gateway.jvm.BatchTableEnvironment.create(
ExecutionEnvironment.get_execution_environment()._j_execution_environment)
else:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
if table_config is None:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment)
else:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment,
table_config._j_table_config)
return BatchTableEnvironment(j_tenv)
| apache-2.0 |
jpautom/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
radioxoma/immunopy | immunopy/stain/cdeconvcl.py | 1 | 6396 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
"""
Created on Wed Aug 06 19:37:30 2014
@author: radioxoma
"""
import os
import numpy as np
from skimage import color
import pyopencl as cl
import pyopencl.array as cla
from scipy import misc
import matplotlib.pyplot as plt
VERBOSE = False
class ColorDeconvolution(object):
"""Provide color deconvolution facilities with OpenCL.
"""
def __init__(self):
super(ColorDeconvolution, self).__init__()
self.__basetype = np.float32
curdir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(curdir, 'kernels.cl')) as f:
kernels = f.read()
# ctx = cl.create_some_context()
self.ctx = cl.Context(
cl.get_platforms()[0].get_devices(device_type=cl.device_type.GPU))
if VERBOSE:
print(self.ctx.get_info(cl.context_info.DEVICES))
queue = cl.CommandQueue(self.ctx)
self.prg = cl.Program(self.ctx, kernels).build()
# print(self.prg.get_info(cl.program_info.KERNEL_NAMES)) # Not in 1:2013.2
# self.stain = color.hed_from_rgb.astype(self.__basetype)
# self.stain_g = cla.to_device(queue, self.f_order(self.stain), self.mem_pool)
# stain = np.arange(9, dtype=self.__basetype).reshape((3, 3))
self.mem_pool = cl.tools.MemoryPool(cl.tools.ImmediateAllocator(queue))
def check_contiguous(self, arr):
"""Change memory layout to C (row-major) order, cast to float32.
It's *not* oposite of f_order.
"""
if not arr.flags.c_contiguous:
arr = np.ascontiguousarray(arr, dtype=np.float32)
if VERBOSE:
print('check_arr: ascontiguous %d elements - performance may suffer') % arr.size
if arr.dtype is not np.float32:
arr = arr.astype(np.float32)
if VERBOSE:
print('check_arr: casting to float32 %d elements - performance may suffer') % arr.size
return arr
def check_fortran(self, arr):
"""Change memory layout to FORTRAN (column-major) order, cast to float32.
"""
if not arr.flags.f_contiguous:
arr = np.asfortranarray(arr, dtype=np.float32)
if VERBOSE:
print('check_arr: as fortran %d elements - performance may suffer') % arr.size
if arr.dtype is not np.float32:
arr = arr.astype(np.float32)
if VERBOSE:
print('check_arr: casting to float32 %d elements - performance may suffer') % arr.size
return arr
def optical_density(self, rgb):
queue = cl.CommandQueue(self.ctx)
if rgb.dtype is not np.float32:
rgb = rgb.astype(np.float32)
img_g = cla.to_device(queue, rgb, self.mem_pool)
self.prg.opticalDense(queue, (img_g.size, 1), None, img_g.data)
return img_g.get()
def dot(self, A, B):
"""Output must have same shape as A.
Incoming RGB matrix "A" should be aligned
"""
A = self.check_contiguous(A)
B = self.check_contiguous(B)
assert(A.flags.c_contiguous == B.flags.c_contiguous)
queue = cl.CommandQueue(self.ctx)
if A.dtype is not np.float32:
A = A.astype(np.float32)
if B.dtype is not np.float32:
B = B.astype(np.float32)
A_g = cla.to_device(queue, A, self.mem_pool)
B_g = cla.to_device(queue, B, self.mem_pool)
C_g = cla.empty(queue, (A.shape[0], B.shape[1]), dtype=A_g.dtype, order="C", allocator=self.mem_pool)
self.prg.gemm_slow(queue, C_g.shape, None, C_g.data, A_g.data, B_g.data, np.int32(A.shape[1]), np.int32(B.shape[1]))
return C_g.get()
def unmix_stains(self, rgb, stain):
"""Take RGB IHC image and split it to stains like skimage version.
"""
rgb = self.check_contiguous(rgb)
stain = self.check_contiguous(stain)
assert(rgb.flags.c_contiguous == stain.flags.c_contiguous)
queue = cl.CommandQueue(self.ctx)
rgb2d = rgb.reshape(-1, 3) # 2D array with R,G,B columns from 3D
rgb2d_g = cla.to_device(queue, rgb2d, allocator=self.mem_pool)
stain_g = cla.to_device(queue, stain, allocator=self.mem_pool)
out_g = cla.empty(queue, (rgb2d.shape[0], stain.shape[1]), dtype=rgb2d_g.dtype, order="C", allocator=self.mem_pool)
# Process as flat array
self.prg.opticalDense(queue, (rgb2d.size, 1), None, rgb2d_g.data)
# In PyOpenCL arrays rgb2d_g.shape[0] is column count (usually 3 columns here).
self.prg.gemm_slow(queue, out_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d.shape[1]), np.int32(stain.shape[1]))
### self.prg.gemm(queue, rgb2d_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d_g.shape[0]), np.int32(stain_g.shape[1]))
# event =
# event.wait()
return out_g.get().reshape(rgb.shape) # Again 3D array
def color_deconvolution(self, rgb, stain):
"""Return stains in normal (non-logarithmic) color space.
"""
rgb = self.check_contiguous(rgb)
stain = self.check_contiguous(stain)
assert(rgb.flags.c_contiguous == stain.flags.c_contiguous)
queue = cl.CommandQueue(self.ctx)
rgb2d = rgb.reshape(-1, 3) # 2D array with R,G,B columns from 3D
rgb2d_g = cla.to_device(queue, rgb2d, allocator=self.mem_pool)
stain_g = cla.to_device(queue, stain, allocator=self.mem_pool)
out_g = cla.empty(queue, (rgb2d.shape[0], stain.shape[1]), dtype=rgb2d_g.dtype, order="C", allocator=self.mem_pool)
# Process as flat array
self.prg.opticalDense(queue, (rgb2d.size, 1), None, rgb2d_g.data)
# In PyOpenCL arrays rgb2d_g.shape[0] is column count (usually 3 columns here).
self.prg.gemm_slow(queue, out_g.shape, None, out_g.data, rgb2d_g.data, stain_g.data, np.int32(rgb2d.shape[1]), np.int32(stain.shape[1]))
self.prg.toColorDense(queue, (out_g.size, 1), None, out_g.data)
return out_g.get().reshape(rgb.shape) # Again 3D array
def f_order(arr):
"""Convert to FORTRAN (column-major) order, if still not."""
if arr.flags.c_contiguous:
print("Transposing array")
return np.array(arr.T, copy=False, order='F')
else:
return np.array(arr, copy=False, order='F')
| mit |
ngoix/OCRF | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
scikit-optimize/scikit-optimize | skopt/sampler/hammersly.py | 1 | 3447 | # -*- coding: utf-8 -*-
""" Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/hammersley.py
"""
import numpy as np
from .halton import Halton
from ..space import Space
from .base import InitialPointGenerator
from sklearn.utils import check_random_state
class Hammersly(InitialPointGenerator):
"""Creates `Hammersley` sequence samples.
The Hammersley set is equivalent to the Halton sequence, except for one
dimension is replaced with a regular grid. It is not recommended to
generate a Hammersley sequence with more than 10 dimension.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
References
----------
T-T. Wong, W-S. Luk, and P-A. Heng, "Sampling with Hammersley and Halton
Points," Journal of Graphics Tools, vol. 2, no. 2, 1997, pp. 9 - 24.
Parameters
----------
min_skip : int, default=-1
Minimum skipped seed number. When `min_skip != max_skip` and
both are > -1, a random number is picked.
max_skip : int, default=-1
Maximum skipped seed number. When `min_skip != max_skip` and
both are > -1, a random number is picked.
primes : tuple, default=None
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
"""
def __init__(self, min_skip=0, max_skip=0, primes=None):
self.primes = primes
self.min_skip = min_skip
self.max_skip = max_skip
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Hammersly set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Hammersley sequence.
Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
Hammersley set.
"""
rng = check_random_state(random_state)
halton = Halton(min_skip=self.min_skip, max_skip=self.max_skip,
primes=self.primes)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if n_dim == 1:
out = halton.generate(dimensions, n_samples,
random_state=rng)
else:
out = np.empty((n_dim, n_samples), dtype=float)
out[:n_dim - 1] = np.array(halton.generate(
[(0., 1.), ] * (n_dim - 1), n_samples,
random_state=rng)).T
out[n_dim - 1] = np.linspace(0, 1, n_samples + 1)[:-1]
out = space.inverse_transform(out.T)
space.set_transformer(transformer)
return out
| bsd-3-clause |
TheTimmy/spack | var/spack/repos/builtin/packages/paraview/package.py | 1 | 5978 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Paraview(CMakePackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'http://www.paraview.org'
url = "http://www.paraview.org/files/v5.3/ParaView-v5.3.0.tar.gz"
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.gz'
version('5.4.0', 'b92847605bac9036414b644f33cb7163')
version('5.3.0', '68fbbbe733aa607ec13d1db1ab5eba71')
version('5.2.0', '4570d1a2a183026adb65b73c7125b8b0')
version('5.1.2', '44fb32fc8988fcdfbc216c9e40c3e925')
version('5.0.1', 'fdf206113369746e2276b95b257d2c9b')
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
depends_on('python@2:2.8', when='+python')
depends_on('py-numpy', when='+python', type='run')
depends_on('py-matplotlib', when='+python', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt', when='@5.3.0:+qt')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
# depends_on('netcdf')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('[email protected]:', type='build')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
def url_for_version(self, version):
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return self._urlfmt.format(version.up_to(2), version, '-source')
else:
return self._urlfmt.format(version.up_to(2), version, '')
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=OFF',
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=OFF',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
if '+python' in spec:
cmake_args.extend([
'-DPARAVIEW_ENABLE_PYTHON:BOOL=ON',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % spec['python'].command.path
])
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix
])
if 'darwin' in self.spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
return cmake_args
| lgpl-2.1 |
0todd0000/spm1d | spm1d/rft1d/examples/val_max_6_twosample_T2_1d.py | 1 | 2060 |
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
def here_hotellingsT2_2samp(yA, yB):
NA,NB = float(yA.shape[0]), float(yB.shape[0])
N = NA + NB
mA,mB = np.matrix(yA.mean(axis=0)), np.matrix(yB.mean(axis=0))
T2 = []
for ii,(mmA,mmB) in enumerate(zip(mA,mB)):
yyA,yyB = np.matrix(yA[:,ii,:]), np.matrix(yB[:,ii,:])
WA,WB = np.cov(yyA.T), np.cov(yyB.T)
W = ((NA-1)*WA + (NB-1)*WB) / (N-2)
t2 = (NA*NB)/float(NA+NB) * (mmB-mmA) * np.linalg.inv(W) * (mmB-mmA).T
T2.append( float(t2) )
return np.asarray(T2)
#(0) Set parameters:
np.random.seed(123456789)
nResponsesA = 15
nResponsesB = 15
nComponents = 2
nIterations = 50 #1000 or bigger for convergence
nNodes = 101
FWHM = 15.0
W0 = np.eye(nComponents)
### derived parameters:
nTotal = nResponsesA + nResponsesB
df = nComponents, nTotal-2
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
T2 = []
generatorA = rft1d.random.GeneratorMulti1D(nResponsesA, nNodes, nComponents, FWHM, W0)
generatorB = rft1d.random.GeneratorMulti1D(nResponsesB, nNodes, nComponents, FWHM, W0)
for i in range(nIterations):
yA = generatorA.generate_sample()
yB = generatorA.generate_sample()
t2 = here_hotellingsT2_2samp(yA, yB)
T2.append( t2.max() )
T2 = np.asarray(T2)
#(2) Compute survival function (SF) for the field maximumimum:
heights = np.linspace(10, 40, 21)
sf = np.array( [ (T2>h).mean() for h in heights] )
sfE = rft1d.T2.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.T2.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (T^2_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title("Two-sample Hotelling's T2 validation (1D)", size=20)
pyplot.show()
| gpl-3.0 |
eranchetz/nupic | examples/audiostream/audiostream_tp.py | 32 | 9991 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
See README.md for details.
"""
"""
numpy - the language of pyaudio (& everything else)
pyaudio - access to the mic via the soundcard
pyplot - to plot the sound frequencies
bitmaparray - encodes an array of indices into an SDR
TP10X2 - the C++ optimized temporal pooler (TP)
"""
import numpy
import pyaudio
import matplotlib.pyplot as plt
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.research.TP10X2 import TP10X2 as TP
class Visualizations:
def calcAnomaly(self, actual, predicted):
"""
Calculates the anomaly of two SDRs
Uses the equation presented on the wiki:
https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo
To put this in terms of the temporal pooler:
A is the actual input array at a given timestep
P is the predicted array that was produced from the previous timestep(s)
[A - (A && P)] / [A]
Rephrasing as questions:
What bits are on in A that are not on in P?
How does that compare to total on bits in A?
Outputs 0 is there's no difference between P and A.
Outputs 1 if P and A are totally distinct.
Not a perfect metric - it doesn't credit proximity
Next step: combine with a metric for a spatial pooler
"""
combined = numpy.logical_and(actual, predicted)
delta = numpy.logical_xor(actual,combined)
delta_score = sum(delta)
actual_score = float(sum(actual))
return delta_score / actual_score
def compareArray(self, actual, predicted):
"""
Produce an array that compares the actual & predicted
'A' - actual
'P' - predicted
'E' - expected (both actual & predicted
' ' - neither an input nor predicted
"""
compare = []
for i in range(actual.size):
if actual[i] and predicted[i]:
compare.append('E')
elif actual[i]:
compare.append('A')
elif predicted[i]:
compare.append('P')
else:
compare.append(' ')
return compare
def hashtagAnomaly(self, anomaly):
"""
Basic printout method to visualize the anomaly score (scale: 1 - 50 #'s)
"""
hashcount = '#'
for i in range(int(anomaly / 0.02)):
hashcount += '#'
for j in range(int((1 - anomaly) / 0.02)):
hashcount += '.'
return hashcount
class AudioStream:
def __init__(self):
"""
Instantiate temporal pooler, encoder, audio sampler, filter, & freq plot
"""
self.vis = Visualizations()
"""
The number of columns in the input and therefore the TP
2**9 = 512
Trial and error pulled that out
numCols should be tested during benchmarking
"""
self.numCols = 2**9
sparsity = 0.10
self.numInput = int(self.numCols * sparsity)
"""
Create a bit map encoder
From the encoder's __init__ method:
1st arg: the total bits in input
2nd arg: the number of bits used to encode each input bit
"""
self.e = SparsePassThroughEncoder(self.numCols, 1)
"""
Sampling details
rate: The sampling rate in Hz of my soundcard
buffersize: The size of the array to which we will save audio segments (2^12 = 4096 is very good)
secToRecord: The length of each sampling
buffersToRecord: how many multiples of buffers are we recording?
"""
rate=44100
secToRecord=.1
self.buffersize=2**12
self.buffersToRecord=int(rate*secToRecord/self.buffersize)
if not self.buffersToRecord:
self.buffersToRecord=1
"""
Filters in Hertz
highHertz: lower limit of the bandpass filter, in Hertz
lowHertz: upper limit of the bandpass filter, in Hertz
max lowHertz = (buffersize / 2 - 1) * rate / buffersize
"""
highHertz = 500
lowHertz = 10000
"""
Convert filters from Hertz to bins
highpass: convert the highHertz into a bin for the FFT
lowpass: convert the lowHertz into a bin for the FFt
NOTES:
highpass is at least the 1st bin since most mics only pick up >=20Hz
lowpass is no higher than buffersize/2 - 1 (highest array index)
passband needs to be wider than size of numInput - not checking for that
"""
self.highpass = max(int(highHertz * self.buffersize / rate),1)
self.lowpass = min(int(lowHertz * self.buffersize / rate), self.buffersize/2 - 1)
"""
The call to create the temporal pooler region
"""
self.tp = TP(numberOfCols=self.numCols, cellsPerColumn=4,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=10, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.07,
activationThreshold=8,
globalDecay=0.02, burnIn=2,
checkSynapseConsistency=False,
pamLength=100)
"""
Creating the audio stream from our mic
"""
p = pyaudio.PyAudio()
self.inStream = p.open(format=pyaudio.paInt32,channels=1,rate=rate,input=True,frames_per_buffer=self.buffersize)
"""
Setting up the array that will handle the timeseries of audio data from our input
"""
self.audio = numpy.empty((self.buffersToRecord*self.buffersize),dtype="uint32")
"""
Print out the inputs
"""
print "Number of columns:\t" + str(self.numCols)
print "Max size of input:\t" + str(self.numInput)
print "Sampling rate (Hz):\t" + str(rate)
print "Passband filter (Hz):\t" + str(highHertz) + " - " + str(lowHertz)
print "Passband filter (bin):\t" + str(self.highpass) + " - " + str(self.lowpass)
print "Bin difference:\t\t" + str(self.lowpass - self.highpass)
print "Buffersize:\t\t" + str(self.buffersize)
"""
Setup the plot
Use the bandpass filter frequency range as the x-axis
Rescale the y-axis
"""
plt.ion()
bin = range(self.highpass,self.lowpass)
xs = numpy.arange(len(bin))*rate/self.buffersize + highHertz
self.freqPlot = plt.plot(xs,xs)[0]
plt.ylim(0, 10**12)
while True:
self.processAudio()
def processAudio (self):
"""
Sample audio, encode, send it to the TP
Pulls the audio from the mic
Conditions that audio as an SDR
Computes a prediction via the TP
Update the visualizations
"""
"""
Cycle through the multiples of the buffers we're sampling
Sample audio to store for each frame in buffersize
Mic voltage-level timeseries is saved as 32-bit binary
Convert that 32-bit binary into integers, and save to array for the FFT
"""
for i in range(self.buffersToRecord):
try:
audioString = self.inStream.read(self.buffersize)
except IOError:
print "Overflow error from 'audiostring = inStream.read(buffersize)'. Try decreasing buffersize."
quit()
self.audio[i*self.buffersize:(i + 1)*self.buffersize] = numpy.fromstring(audioString,dtype = "uint32")
"""
Get int array of strength for each bin of frequencies via fast fourier transform
Get the indices of the strongest frequencies (the top 'numInput')
Scale the indices so that the frequencies fit to within numCols
Pick out the unique indices (we've reduced the mapping, so we likely have multiples)
Encode those indices into an SDR via the SparsePassThroughEncoder
Cast the SDR as a float for the TP
"""
ys = self.fft(self.audio, self.highpass, self.lowpass)
fs = numpy.sort(ys.argsort()[-self.numInput:])
rfs = fs.astype(numpy.float32) / (self.lowpass - self.highpass) * self.numCols
ufs = numpy.unique(rfs)
actualInt = self.e.encode(ufs)
actual = actualInt.astype(numpy.float32)
"""
Pass the SDR to the TP
Collect the prediction SDR from the TP
Pass the prediction & actual SDRS to the anomaly calculator & array comparer
Update the frequency plot
"""
self.tp.compute(actual, enableLearn = True, computeInfOutput = True)
predictedInt = self.tp.getPredictedState().max(axis=1)
compare = self.vis.compareArray(actualInt, predictedInt)
anomaly = self.vis.calcAnomaly(actualInt, predictedInt)
print "." . join(compare)
print self.vis.hashtagAnomaly(anomaly)
self.freqPlot.set_ydata(ys)
plt.show(block = False)
plt.draw()
def fft(self, audio, highpass, lowpass):
"""
Fast fourier transform conditioning
Output:
'output' contains the strength of each frequency in the audio signal
frequencies are marked by its position in 'output':
frequency = index * rate / buffesize
output.size = buffersize/2
Method:
Use numpy's FFT (numpy.fft.fft)
Find the magnitude of the complex numbers returned (abs value)
Split the FFT array in half, because we have mirror frequencies
(they're the complex conjugates)
Use just the first half to apply the bandpass filter
Great info here: http://stackoverflow.com/questions/4364823/how-to-get-frequency-from-fft-result
"""
left,right = numpy.split(numpy.abs(numpy.fft.fft(audio)),2)
output = left[highpass:lowpass]
return output
audiostream = AudioStream()
| agpl-3.0 |
lfairchild/PmagPy | programs/zeq_magic.py | 1 | 32807 | #!/usr/bin/env python
# -*- python-indent-offset: 4; -*-
import pandas as pd
import numpy as np
import sys
import os
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.contribution_builder as cb
from pmag_env import set_env
def save_redo(SpecRecs, inspec):
print("Saving changes to specimen file")
pmag.magic_write(inspec, SpecRecs, 'specimens')
def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in magic_measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a pmag_specimens formatted file [and allows re-interpretations of best-fit lines
and planes and saves (revised or new) interpretations in a pmag_specimens file.
interpretations are saved in the coordinate system used. Also allows judicious editting of
measurements to eliminate "bad" measurements. These are marked as such in the magic_measurements
input file. they are NOT deleted, just ignored. ] Bracketed part not yet implemented
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets measurements format input file, default: measurements.txt
-fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt
-fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt
-fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
SCREEN OUTPUT:
Specimen, N, a95, StepMin, StepMax, Dec, Inc, calculation type
"""
# initialize some variables
doave, e, b = 1, 0, 0 # average replicates, initial end and beginning step
intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
plots, coord = 0, 's'
noorient = 0
version_num = pmag.get_version()
verbose = pmagplotlib.verbose
calculation_type, fmt = "", "svg"
spec_keys = []
geo, tilt, ask = 0, 0, 0
PriorRecs = [] # empty list for prior interpretations
backup = 0
specimen = "" # can skip everything and just plot one specimen with bounds e,b
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd())
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
spec_file = pmag.get_named_arg(
"-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
#meas_file = os.path.join(dir_path, meas_file)
#spec_file = os.path.join(dir_path, spec_file)
#samp_file = os.path.join(dir_path, samp_file)
#site_file = os.path.join(dir_path, site_file)
plot_file = pmag.get_named_arg("-Fp", default_val="")
crd = pmag.get_named_arg("-crd", default_val="s")
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
saved_coord = coord
fmt = pmag.get_named_arg("-fmt", "svg")
specimen = pmag.get_named_arg("-spc", default_val="")
#if specimen: # just save plot and exit
# plots, verbose = 1, 0
beg_pca, end_pca = "", ""
if '-dir' in sys.argv:
ind = sys.argv.index('-dir')
direction_type = sys.argv[ind + 1]
beg_pca = int(sys.argv[ind + 2])
end_pca = int(sys.argv[ind + 3])
if direction_type == 'L':
calculation_type = 'DE-BFL'
if direction_type == 'P':
calculation_type = 'DE-BFP'
if direction_type == 'F':
calculation_type = 'DE-FM'
if '-A' in sys.argv:
doave = 0
if '-sav' in sys.argv:
plots, verbose = 1, 0
#
first_save = 1
fnames = {'measurements': meas_file, 'specimens': spec_file,
'samples': samp_file, 'sites': site_file}
contribution = cb.Contribution(dir_path, custom_filenames=fnames, read_tables=[
'measurements', 'specimens', 'samples', 'sites'])
#
# import specimens
if 'measurements' not in contribution.tables:
print('-W- No measurements table found in your working directory')
return
specimen_cols = ['analysts', 'aniso_ftest', 'aniso_ftest12', 'aniso_ftest23', 'aniso_s', 'aniso_s_mean', 'aniso_s_n_measurements', 'aniso_s_sigma', 'aniso_s_unit', 'aniso_tilt_correction', 'aniso_type', 'aniso_v1', 'aniso_v2', 'aniso_v3', 'citations', 'description', 'dir_alpha95', 'dir_comp', 'dir_dec', 'dir_inc', 'dir_mad_free', 'dir_n_measurements', 'dir_tilt_correction', 'experiments', 'geologic_classes',
'geologic_types', 'hyst_bc', 'hyst_bcr', 'hyst_mr_moment', 'hyst_ms_moment', 'int_abs', 'int_b', 'int_b_beta', 'int_b_sigma', 'int_corr', 'int_dang', 'int_drats', 'int_f', 'int_fvds', 'int_gamma', 'int_mad_free', 'int_md', 'int_n_measurements', 'int_n_ptrm', 'int_q', 'int_rsc', 'int_treat_dc_field', 'lithologies', 'meas_step_max', 'meas_step_min', 'meas_step_unit', 'method_codes', 'sample', 'software_packages', 'specimen']
if 'specimens' in contribution.tables:
contribution.propagate_name_down('sample','measurements')
# add location/site info to measurements table for naming plots
if pmagplotlib.isServer:
contribution.propagate_name_down('site', 'measurements')
contribution.propagate_name_down('location', 'measurements')
spec_container = contribution.tables['specimens']
if 'method_codes' not in spec_container.df.columns:
spec_container.df['method_codes'] = None
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
#
# tie sample names to measurement data
#
else:
spec_container, prior_spec_data = None, []
#
# import samples for orientation info
#
if 'samples' in contribution.tables:
samp_container=contribution.tables['samples']
samps=samp_container.df
samp_data=samps.to_dict('records')# convert to list of dictionaries for use with get_orient
else:
samp_data=[]
#if ('samples' in contribution.tables) and ('specimens' in contribution.tables):
# # contribution.propagate_name_down('site','measurements')
# contribution.propagate_cols(col_names=[
# 'azimuth', 'dip', 'orientation_quality','bed_dip','bed_dip_direction'], target_df_name='measurements', source_df_name='samples')
##
# define figure numbers for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
#
ZED = {}
ZED['eqarea'], ZED['zijd'], ZED['demag'] = 1, 2, 3
pmagplotlib.plot_init(ZED['eqarea'], 6, 6)
pmagplotlib.plot_init(ZED['zijd'], 6, 6)
pmagplotlib.plot_init(ZED['demag'], 6, 6)
# save_pca=0
angle, direction_type, setangle = "", "", 0
# create measurement dataframe
#
meas_container = contribution.tables['measurements']
meas_data = meas_container.df
#
meas_data = meas_data[meas_data['method_codes'].str.contains(
'LT-NO|LT-AF-Z|LT-T-Z|LT-M-Z') == True] # fish out steps for plotting
meas_data = meas_data[meas_data['method_codes'].str.contains(
'AN|ARM|LP-TRM|LP-PI-ARM') == False] # strip out unwanted experiments
intensity_types = [
col_name for col_name in meas_data.columns if col_name in intlist]
intensity_types = [
col_name for col_name in intensity_types if any(meas_data[col_name])]
if not len(intensity_types):
print('-W- No intensity columns found')
return
# plot non-empty intensity method found - normalized to initial value anyway -
# doesn't matter which used
int_key = cb.get_intensity_col(meas_data)
# get all the non-null intensity records of the same type
meas_data = meas_data[meas_data[int_key].notnull()]
if 'quality' not in meas_data.columns:
meas_data['quality'] = 'g' # set the default flag to good
# need to treat LP-NO specially for af data, treatment should be zero,
# otherwise 273.
#meas_data['treatment'] = meas_data['treat_ac_field'].where(
# cond=meas_data['treat_ac_field'] != 0, other=meas_data['treat_temp'])
meas_data['treatment'] = meas_data['treat_ac_field'].where(
cond=meas_data['treat_ac_field'].astype(bool), other=meas_data['treat_temp'])
meas_data['ZI'] = 1 # initialize these to one
meas_data['instrument_codes'] = "" # initialize these to blank
# for unusual case of microwave power....
if 'treat_mw_power' in meas_data.columns:
meas_data.loc[
(meas_data.treat_mw_power != 0) &
(meas_data.treat_mw_power) &
(meas_data.treat_mw_time),
'treatment'] = meas_data.treat_mw_power * meas_data.treat_mw_time
#
# get list of unique specimen names from measurement data
#
# this is a list of all the specimen names
specimen_names = meas_data.specimen.unique()
specimen_names = specimen_names.tolist()
specimen_names.sort()
#
# set up new DataFrame for this sessions specimen interpretations
#
data_container = cb.MagicDataFrame(
dtype='specimens', columns=specimen_cols)
# this is for interpretations from this session
current_spec_data = data_container.df
if specimen == "":
k = 0
else:
k = specimen_names.index(specimen)
# let's look at the data now
while k < len(specimen_names):
mpars={"specimen_direction_type": "Error"}
# set the current specimen for plotting
this_specimen = str(specimen_names[k])
# reset beginning/end pca if plotting more than one specimen
if not specimen:
beg_pca, end_pca = "", ""
if verbose and this_specimen != "":
print(this_specimen, k + 1, 'out of ', len(specimen_names))
if setangle == 0:
angle = ""
this_specimen_measurements = meas_data[meas_data['specimen'].astype(str).str.contains(
this_specimen).astype(bool)] # fish out this specimen
this_specimen_measurements = this_specimen_measurements[-this_specimen_measurements['quality'].str.contains(
'b').astype(bool)] # remove bad measurements
if len(this_specimen_measurements) != 0: # if there are measurements
meas_list=this_specimen_measurements.to_dict('records') # get a list of dictionaries
this_sample=""
if coord != '-1' and 'sample' in meas_list[0].keys(): # look up sample name
this_sample=pmag.get_dictitem(meas_list,'specimen',this_specimen,'T')
if len(this_sample)>0:
this_sample=this_sample[0]['sample']
#
# set up datablock [[treatment,dec, inc, int, direction_type],[....]]
#
#
# figure out the method codes
#
units, methods, title = "", "", this_specimen
if pmagplotlib.isServer:
try:
loc = this_specimen_measurements.loc[:, 'location'].values[0]
except:
loc = ""
try:
site = this_specimen_measurements.loc[:, 'site'].values[0]
except:
site = ""
try:
samp = this_specimen_measurements.loc[:, 'sample'].values[0]
except:
samp = ""
title = "LO:_{}_SI:_{}_SA:_{}_SP:_{}_".format(loc, site, samp, this_specimen)
# this is a list of all the specimen method codes
meas_meths = this_specimen_measurements.method_codes.unique()
tr = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if any(cb.is_null(treat, False) for treat in tr):
print('-W- Missing required values in measurements.treatment for {}, skipping'.format(this_specimen))
if specimen:
return
k += 1
continue
if set(tr) == set([0]):
print('-W- Missing required values in measurements.treatment for {}, skipping'.format(this_specimen))
if specimen:
return
k += 1
continue
for m in meas_meths:
if 'LT-AF-Z' in m and 'T' not in units:
units = 'T' # units include tesla
tr[0] = 0
if 'LT-T-Z' in m and 'K' not in units:
units = units + ":K" # units include kelvin
if 'LT-M-Z' in m and 'J' not in units:
units = units + ':J' # units include joules
tr[0] = 0
units = units.strip(':') # strip off extra colons
if 'LP-' in m:
methods = methods + ":" + m
decs = pd.to_numeric(this_specimen_measurements.dir_dec).tolist()
incs = pd.to_numeric(this_specimen_measurements.dir_inc).tolist()
#
# fix the coordinate system
#
# revert to original coordinate system
coord = saved_coord
if coord != '-1': # need to transform coordinates to geographic
# get the azimuth
or_info,az_type=pmag.get_orient(samp_data,this_sample,data_model=3)
if 'azimuth' in or_info.keys() and cb.not_null(or_info['azimuth'], False):
#azimuths = pd.to_numeric(
# this_specimen_measurements.azimuth).tolist()
#dips = pd.to_numeric(this_specimen_measurements.dip).tolist()
azimuths=len(decs)*[or_info['azimuth']]
dips=len(decs)*[or_info['dip']]
# if azimuth/dip is missing, plot using specimen coordinates instead
else:
azimuths,dips=[],[]
if any([cb.is_null(az) for az in azimuths if az != 0]):
coord = '-1'
print("-W- Couldn't find azimuth and dip for {}".format(this_specimen))
print(" Plotting with specimen coordinates instead")
elif any([cb.is_null(dip) for dip in dips if dip != 0]):
coord = '-1'
print("-W- Couldn't find azimuth and dip for {}".format(this_specimen))
print(" Plotting with specimen coordinates instead")
else:
coord = saved_coord
# if azimuth and dip were found, continue with geographic coordinates
if coord != "-1" and len(azimuths)>0:
dirs = [decs, incs, azimuths, dips]
# this transposes the columns and rows of the list of lists
dirs_geo = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dogeo_V(dirs_geo)
if coord == '100' and 'bed_dip_direction' in or_info.keys() and or_info['bed_dip_direction']!="": # need to do tilt correction too
bed_dip_dirs=len(decs)*[or_info['bed_dip_direction']]
bed_dips=len(decs)*[or_info['bed_dip']]
#bed_dip_dirs = pd.to_numeric(
# this_specimen_measurements.bed_dip_direction).tolist() # get the azimuths
#bed_dips = pd.to_numeric(
# this_specimen_measurements.bed_dip).tolist() # get the azimuths
dirs = [decs, incs, bed_dip_dirs, bed_dips]
## this transposes the columns and rows of the list of lists
dirs_tilt = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dotilt_V(dirs_tilt)
if pmagplotlib.isServer:
title = title + "CO:_t_"
else:
title = title + '_t'
else:
if pmagplotlib.isServer:
title = title + "CO:_g_"
else:
title = title + '_g'
if angle == "":
angle = decs[0]
ints = pd.to_numeric(this_specimen_measurements[int_key]).tolist()
ZI = this_specimen_measurements.ZI.tolist()
flags = this_specimen_measurements.quality.tolist()
codes = this_specimen_measurements.instrument_codes.tolist()
datalist = [tr, decs, incs, ints, ZI, flags, codes]
# this transposes the columns and rows of the list of lists
datablock = list(map(list, list(zip(*datalist))))
pmagplotlib.plot_zed(ZED, datablock, angle, title, units)
if verbose and not set_env.IS_WIN:
pmagplotlib.draw_figs(ZED)
#
# collect info for current_specimen_interpretation dictionary
#
#
# find prior interpretation
#
prior_specimen_interpretations=[]
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str).str.contains(this_specimen) == True]
if (beg_pca == "") and (len(prior_specimen_interpretations) != 0):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = tr.index(beg_pcas[ind]), tr.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
print('-W- Specimen record contains invalid start/stop bounds:')
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
if verbose and not set_env.IS_WIN:
pmagplotlib.draw_figs(ZED)
### SKIP if no prior interpretation - this section should not be used:
# else:
# try:
# start, end = int(beg_pca), int(end_pca)
# except ValueError:
# beg_pca = 0
# end_pca = len(datablock) - 1
# start, end = int(beg_pca), int(end_pca)
# # # calculate direction/plane
# try:
# mpars = pmag.domean(datablock, start, end, calculation_type)
# except Exception as ex:
# print('-I- Problem with {}'.format(this_specimen))
# print(' ', ex)
# print(' Skipping')
# continue
# k += 1
# if mpars["specimen_direction_type"] != "Error":
# # put it on the plot
# pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
# if verbose:
# pmagplotlib.draw_figs(ZED)
if plots == 1 or specimen != "":
if plot_file == "":
basename = title
else:
basename = plot_file
files = {}
for key in list(ZED.keys()):
files[key] = basename + '_' + key + '.' + fmt
if pmagplotlib.isServer:
files[key] = basename + "TY:_{}_.".format(key) + fmt
pmagplotlib.save_plots(ZED, files)
if specimen != "":
sys.exit()
if verbose:
recnum = 0
for plotrec in datablock:
if units == 'T':
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if units == "K":
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if units == "J":
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0], ' J', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if 'K' in units and 'T' in units:
if plotrec[0] >= 1.:
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
if plotrec[0] < 1.:
print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (
plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6]))
recnum += 1
# we have a current interpretation
elif mpars["specimen_direction_type"] != "Error":
#
# create a new specimen record for the interpreation for this
# specimen
this_specimen_interpretation = {
col: "" for col in specimen_cols}
# this_specimen_interpretation["analysts"]=user
this_specimen_interpretation['software_packages'] = version_num
this_specimen_interpretation['specimen'] = this_specimen
this_specimen_interpretation["method_codes"] = calculation_type
this_specimen_interpretation["meas_step_unit"] = units
this_specimen_interpretation["meas_step_min"] = tr[start]
this_specimen_interpretation["meas_step_max"] = tr[end]
this_specimen_interpretation["dir_dec"] = '%7.1f' % (
mpars['specimen_dec'])
this_specimen_interpretation["dir_inc"] = '%7.1f' % (
mpars['specimen_inc'])
if "specimen_dang" in mpars:
this_specimen_interpretation["dir_dang"] = '%7.1f' % (
mpars['specimen_dang'])
else:
this_specimen_interpretation["dir_dang"] = ''
this_specimen_interpretation["dir_n_measurements"] = '%i' % (
mpars['specimen_n'])
this_specimen_interpretation["dir_tilt_correction"] = coord
methods = methods.replace(" ", "")
if "T" in units:
methods = methods + ":LP-DIR-AF"
if "K" in units:
methods = methods + ":LP-DIR-T"
if "J" in units:
methods = methods + ":LP-DIR-M"
this_specimen_interpretation["method_codes"] = methods.strip(
':')
this_specimen_interpretation["experiments"] = this_specimen_measurements.experiment.unique()[
0]
#
# print some stuff
#
if calculation_type != 'DE-FM':
this_specimen_interpretation["dir_mad_free"] = '%7.1f' % (
mpars['specimen_mad'])
this_specimen_interpretation["dir_alpha95"] = ''
if verbose:
if units == 'K':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) - 273, float(this_specimen_interpretation["meas_step_max"]) - 273, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif units == 'T':
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) * 1e3, float(this_specimen_interpretation["meas_step_max"]) * 1e3, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units and 'K' in units:
if float(this_specimen_interpretation['meas_step_min']) < 1.0:
min = float(
this_specimen_interpretation['meas_step_min']) * 1e3
else:
min = float(
this_specimen_interpretation['meas_step_min']) - 273
if float(this_specimen_interpretation['meas_step_max']) < 1.0:
max = float(
this_specimen_interpretation['meas_step_max']) * 1e3
else:
max = float(
this_specimen_interpretation['meas_step_max']) - 273
print('%s %i %7.1f %i %i %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(
this_specimen_interpretation["dir_dang"]), min, max, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]), float(this_specimen_interpretation["meas_step_max"]), float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
this_specimen_interpretation["dir_alpha95"] = '%7.1f' % (
mpars['specimen_alpha95'])
this_specimen_interpretation["dir_mad_free"] = ''
if verbose:
if 'K' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurments"]), float(this_specimen_interpretation["dir_mad_free"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) - 273, float(this_specimen_interpretation["meas_step_max"]) - 273, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_alpha95"]), float(this_specimen_interpretation["dir_dang"]), float(
this_specimen_interpretation["meas_step_min"]) * 1e3, float(this_specimen_interpretation["meas_step_max"]) * 1e3, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
elif 'T' in units and 'K' in units:
if float(this_specimen_interpretation['meas_step_min']) < 1.0:
min = float(
this_specimen_interpretation['meas_step_min']) * 1e3
else:
min = float(
this_specimen_interpretation['meas_step_min']) - 273
if float(this_specimen_interpretation['meas_step_max']) < 1.0:
max = float(
this_specimen_interpretation['meas_step_max']) * 1e3
else:
max = float(
this_specimen_interpretation['meas_step_max']) - 273
print('%s %i %7.1f %i %i %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(
this_specimen_interpretation["dir_alpha95"]), min, max, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
else:
print('%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation["dir_n_measurements"]), float(this_specimen_interpretation["dir_alpha95"]), float(
this_specimen_interpretation["meas_step_min"]), float(this_specimen_interpretation["meas_step_max"]), float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type))
if verbose:
saveit = input("Save this interpretation? [y]/n \n")
else:
print("no data", this_specimen)
if verbose:
pmagplotlib.draw_figs(ZED)
#res = input(' <return> for next specimen, [q]uit ')
res = input("S[a]ve plots, [q]uit, or <return> to continue ")
if res == 'a':
files = {plot_type: this_specimen + "_" + plot_type + "." + fmt for plot_type in ZED}
pmagplotlib.save_plots(ZED, files)
print("")
if res == 'q':
return
k += 1
#
if __name__ == "__main__":
main()
| bsd-3-clause |
Hiyorimi/scikit-image | skimage/viewer/canvastools/linetool.py | 43 | 6911 | import numpy as np
from matplotlib import lines
from ...viewer.canvastools.base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, handle_props=None,
**kwargs):
super(LineTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
self.ax.add_line(self._line)
self._handles = ToolHandles(self.ax, x, y,
marker_props=handle_props)
self._handles.set_visible(False)
self.artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" %
np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
self.manager.add_tool(self)
@property
def end_points(self):
return self._end_pts.astype(int)
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self._line.set_linewidth(self.linewidth)
self.set_visible(True)
self.redraw()
def hit_test(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return False
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
return True
else:
self._active_pt = None
return False
def on_mouse_press(self, event):
self.set_visible(True)
if self._active_pt is None:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None, handle_props=None):
super(ThickLineTool, self).__init__(manager,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props,
handle_props=handle_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__': # pragma: no cover
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
h, w = image.shape
line_tool = ThickLineTool(viewer)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
viewer.show()
| bsd-3-clause |
arjoly/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
stjepang/channel | benchmarks/plot.py | 1 | 3917 | #!/usr/bin/env python2
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
results = []
for f in sys.argv[1:]:
with open(f) as f:
for line in f.readlines():
test, lang, impl, secs, _ = line.split()
results.append((test, lang, impl, float(secs)))
fig = plt.figure(figsize=(10, 10))
def plot(subplot, title, prefix, runs):
runs.reverse()
ys = [7 * (i+1) for i in xrange(len(runs))]
ax = fig.add_subplot(subplot)
ax.set_title(title)
ax.set_yticks(ys)
ax.set_yticklabels(runs)
ax.tick_params(which='major', length=0)
ax.set_xlabel('seconds')
go = [0] * len(runs)
mpsc = [0] * len(runs)
msqueue = [0] * len(runs)
segqueue = [0] * len(runs)
chan = [0] * len(runs)
channel = [0] * len(runs)
for (i, run) in enumerate(runs):
for (test, lang, impl, secs) in results:
if test == prefix + '_' + run:
if lang == 'Go' and impl == 'chan':
go[i] = secs
if lang == 'Rust' and impl == 'mpsc':
mpsc[i] = secs
if lang == 'Rust' and impl == 'MsQueue':
msqueue[i] = secs
if lang == 'Rust' and impl == 'SegQueue':
segqueue[i] = secs
if lang == 'Rust' and impl == 'chan':
chan[i] = secs
if lang == 'Rust' and impl == 'channel':
channel[i] = secs
opts = dict(height=0.7, align='center')
ax.barh([y-3 for y in ys], go, color='skyblue', **opts)
ax.barh([y-2 for y in ys], channel, color='red', **opts)
ax.barh([y-1 for y in ys], mpsc, color='black', **opts)
ax.barh([y+0 for y in ys], chan, color='orange', **opts)
ax.barh([y+1 for y in ys], msqueue, color='blue', **opts)
ax.barh([y+2 for y in ys], segqueue, color='green', **opts)
m = int(max(go + mpsc + msqueue + segqueue + chan + channel) * 1.2)
if m < 10:
ax.set_xticks(range(m + 1))
elif m < 50:
ax.set_xticks([x*5 for x in range(m / 5 + 1)])
else:
ax.set_xticks([x*10 for x in range(m / 10 + 1)])
for (x, y) in zip(go, ys):
if x > 0:
ax.text(x+m/200., y-3-0.3, 'Go', fontsize=7)
for (x, y) in zip(channel, ys):
if x > 0:
ax.text(x+m/200., y-2-0.3, 'crossbeam-channel', fontsize=7)
for (x, y) in zip(mpsc, ys):
if x > 0:
ax.text(x+m/200., y-1-0.3, 'mpsc', fontsize=7)
for (x, y) in zip(chan, ys):
if x > 0:
ax.text(x+m/200., y+0-0.3, 'chan', fontsize=7)
for (x, y) in zip(msqueue, ys):
if x > 0:
ax.text(x+m/200., y+1-0.3, 'MsQueue', fontsize=7)
for (x, y) in zip(segqueue, ys):
if x > 0:
ax.text(x+m/200., y+2-0.3, 'SegQueue', fontsize=7)
plot(
221,
"Bounded channel of capacity 0",
'bounded0',
['spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'],
)
plot(
222,
"Bounded channel of capacity 1",
'bounded1',
['spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'],
)
plot(
223,
"Bounded channel of capacity N",
'bounded',
['seq', 'spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'],
)
plot(
224,
"Unbounded channel",
'unbounded',
['seq', 'spsc', 'mpsc', 'mpmc', 'select_rx', 'select_both'],
)
legend = [
('Go channel', 'skyblue'),
('crossbeam-channel', 'red'),
('std::sync::mpsc', 'black'),
('chan', 'orange'),
('crossbeam::sync::MsQueue', 'blue'),
('crossbeam::sync::SegQueue', 'green'),
]
legend.reverse()
fig.legend(
[mpatches.Patch(label=label, color=color) for (label, color) in legend],
[label for (label, color) in legend],
'upper center',
ncol=2,
)
plt.subplots_adjust(
top=0.88,
bottom=0.05,
left=0.1,
right=0.95,
wspace=0.3,
hspace=0.2,
)
plt.savefig('plot.png')
# plt.show()
| apache-2.0 |
snorfalorpagus/pywr | pywr/recorders/events.py | 1 | 12969 | from ._recorders import Recorder
import numpy as np
import pandas
class Event(object):
""" Container for event information """
def __init__(self, start, scenario_index):
self.start = start
self.scenario_index = scenario_index
self.end = None
self.values = None # to record any tracked values
@property
def duration(self):
td = self.end.datetime - self.start.datetime
return td.days
class EventRecorder(Recorder):
"""Track discrete events using a Parameter or Recorder
The recorder works with an `IndexParameter`, `Parameter` or `Recorder`. An
event is considered active while the value of the threshold is non-zero.
The events are stored in a flat list across all scenarios. Each
event is stored as a separate `Event` object. Events can be accessed as a
dataframe using the `to_dataframe` method.
Parameters
----------
threshold - IndexParameter, Parameter or Recorder
The object that defines the start and end of an event.
minimum_event_length - int (default=1)
The minimum number of time-steps that an event must last for
to be recorded. This is useful to not record events that are
caused by model hysteresis. The default will cause all events
to be recorded.
agg_func - string, callable
Function used for aggregating across the recorders. Numpy style functions that
support an axis argument are supported.
event_agg_func - string, callable
Optional different function for aggregating the `tracked_parameter` across events.
If given this aggregation will be added as a `value` column in the `to_dataframe` method.
tracked_parameter - `Parameter`
The parameter to track across each event. The values from this parameter are appended each
time-step to each event. These can then be used with other event recorders for statistical
aggregation, or with `event_agg_func`.
See also
--------
`pywr.parameters._thresholds`
"""
def __init__(self, model, threshold, minimum_event_length=1, tracked_parameter=None, **kwargs):
self.event_agg_func = kwargs.pop('event_agg_func', kwargs.get('agg_func'))
super(EventRecorder, self).__init__(model, **kwargs)
self.threshold = threshold
self.threshold.parents.add(self)
if minimum_event_length < 1:
raise ValueError('Keyword "minimum_event_length" must be >= 1')
self.minimum_event_length = minimum_event_length
self.events = None
self._current_events = None
# TODO make this more generic to track components or nodes (e.g. storage volume)
self.tracked_parameter = tracked_parameter
if self.tracked_parameter is not None:
self.tracked_parameter.parents.add(self)
def setup(self):
pass
def reset(self):
self.events = []
# This list stores if an event is current active in each scenario.
self._current_events = [None for si in self.model.scenarios.combinations]
def after(self):
# Current timestep
ts = self.model.timestepper.current
from pywr.parameters import Parameter, IndexParameter
if isinstance(self.threshold, Recorder):
all_triggered = np.array(self.threshold.values(), dtype=np.int)
elif isinstance(self.threshold, IndexParameter):
all_triggered = self.threshold.get_all_indices()
elif isinstance(self.threshold, Parameter):
all_triggered = np.array(self.threshold.get_all_values(), dtype=np.int)
else:
raise TypeError("Threshold must be either a Recorder or Parameter instance.")
for si in self.model.scenarios.combinations:
# Determine if an event is active this time-step/scenario combination
triggered = all_triggered[si.global_id]
# Get the current event
current_event = self._current_events[si.global_id]
if current_event is not None:
# A current event is active
if triggered:
# Current event continues
# Update the timeseries of event data
if self.tracked_parameter is not None:
value = self.tracked_parameter.get_value(si)
current_event.values.append(value)
else:
# Update the end of the current event.
current_event.end = ts
current_event.values = np.array(current_event.values) # Convert list to nparray
current_length = ts.index - current_event.start.index
if current_length >= self.minimum_event_length:
# Current event ends
self.events.append(current_event)
# Event has ended; no further updates
current_event = None
else:
# Event wasn't long enough; don't append
current_event = None
else:
# No current event
if triggered:
# Start of a new event
current_event = Event(ts, si)
# Start the timeseries of event data
if self.tracked_parameter is not None:
value = self.tracked_parameter.get_value(si)
current_event.values = [value, ]
else:
# No event active and one hasn't started
# Therefore do nothing.
pass
# Update list of current events
self._current_events[si.global_id] = current_event
def finish(self):
ts = self.model.timestepper.current
# Complete any unfinished events
for si in self.model.scenarios.combinations:
# Get the current event
current_event = self._current_events[si.global_id]
if current_event is not None:
# Unfinished event
current_event.end = ts
self.events.append(current_event)
self._current_events[si.global_id] = None
def to_dataframe(self):
""" Returns a `pandas.DataFrame` containing all of the events.
If `event_agg_func` is a valid aggregation function and `tracked_parameter`
is given then a "value" column is added to the dataframe containing the
result of the aggregation.
"""
# Return empty dataframe if no events are found.
if len(self.events) == 0:
return pandas.DataFrame(columns=['scenario_id', 'start', 'end'])
scen_id = np.empty(len(self.events), dtype=np.int)
start = np.empty_like(scen_id, dtype=object)
end = np.empty_like(scen_id, dtype=object)
values = np.empty_like(scen_id, dtype=float)
for i, evt in enumerate(self.events):
scen_id[i] = evt.scenario_index.global_id
start[i] = evt.start.datetime
end[i] = evt.end.datetime
if self.tracked_parameter is not None and self.event_agg_func is not None:
values[i] = pandas.Series(evt.values).aggregate(self.event_agg_func)
df_dict = {'scenario_id': scen_id, 'start': start, 'end': end}
if self.tracked_parameter is not None and self.event_agg_func is not None:
df_dict["value"] = values
return pandas.DataFrame(df_dict)
class EventDurationRecorder(Recorder):
""" Recorder for the duration of events found by an EventRecorder
This Recorder uses the results of an EventRecorder to calculate the duration
of those events in each scenario. Aggregation by scenario is done via
the pandas.DataFrame.groupby() method.
Any scenario which has no events will contain a NaN value.
Parameters
----------
event_recorder : EventRecorder
EventRecorder instance to calculate the events.
agg_func - string, callable
Function used for aggregating across the recorders. Numpy style functions that
support an axis argument are supported.
recorder_agg_func - string, callable
Optional aggregating function for all events in each scenario. The function
must be supported by the `DataFrame.group_by` method.
"""
def __init__(self, model, event_recorder, **kwargs):
# Optional different method for aggregating across self.recorders scenarios
agg_func = kwargs.pop('recorder_agg_func', kwargs.get('agg_func'))
self.recorder_agg_func = agg_func
super(EventDurationRecorder, self).__init__(model, **kwargs)
self.event_recorder = event_recorder
self.event_recorder.parents.add(self)
def setup(self):
self._values = np.empty(len(self.model.scenarios.combinations))
def reset(self):
self._values[...] = 0.0
def values(self):
return self._values
def finish(self):
df = self.event_recorder.to_dataframe()
self._values[...] = 0.0
# No events found
if len(df) == 0:
return
# Calculate duration
df['duration'] = df['end'] - df['start']
# Convert to int of days
df['duration'] = df['duration'].dt.days
# Drop other columns
df = df[['scenario_id', 'duration']]
# Group by scenario ...
grouped = df.groupby('scenario_id').agg(self.recorder_agg_func)
# ... and update the internal values
for index, row in grouped.iterrows():
self._values[index] = row['duration']
class EventStatisticRecorder(Recorder):
""" Recorder for the duration of events found by an EventRecorder
This Recorder uses the results of an EventRecorder to calculate aggregated statistics
of those events in each scenario. This requires the EventRecorder to be given a `tracked_parameter`
in order to save an array of values during each event. This recorder uses `event_agg_func` to aggregate
those saved values in each event before applying `recorder_agg_func` to those values in each scenario.
Aggregation by scenario is done via the pandas.DataFrame.groupby() method.
Any scenario which has no events will contain a NaN value regardless of the aggregation function defined.
Parameters
----------
model : pywr.model.Model
event_recorder : EventRecorder
EventRecorder instance to calculate the events.
agg_func - string, callable
Function used for aggregating across the recorders. Numpy style functions that
support an axis argument are supported.
recorder_agg_func - string, callable
Optional aggregating function for all events in each scenario. The function
must be supported by the `DataFrame.group_by` method.
event_agg_func - string, callable
Optional different function for aggregating the `tracked_parameter` across events.
If given this aggregation will be added as a `value` column in the `to_dataframe` method.
"""
def __init__(self, model, event_recorder, **kwargs):
# Optional different method for aggregating across self.recorders scenarios
agg_func = kwargs.pop('event_agg_func', kwargs.get('agg_func'))
self.event_agg_func = agg_func
agg_func = kwargs.pop('recorder_agg_func', kwargs.get('agg_func'))
self.recorder_agg_func = agg_func
super(EventStatisticRecorder, self).__init__(model, **kwargs)
self.event_recorder = event_recorder
self.event_recorder.parents.add(self)
def setup(self):
self._values = np.empty(len(self.model.scenarios.combinations))
if self.event_recorder.tracked_parameter is None:
raise ValueError('To calculate event statistics requires the parent `EventRecorder` to have a `tracked_parameter`.')
def reset(self):
self._values[...] = np.nan
def values(self):
return self._values
def finish(self):
""" Compute the aggregated value in each scenario based on the parent `EventRecorder` events """
events = self.event_recorder.events
# Return NaN if no events found
if len(events) == 0:
return
scen_id = np.empty(len(events), dtype=np.int)
values = np.empty_like(scen_id, dtype=np.float64)
for i, evt in enumerate(events):
scen_id[i] = evt.scenario_index.global_id
values[i] = pandas.Series(evt.values).aggregate(self.event_agg_func)
df = pandas.DataFrame({'scenario_id': scen_id, 'value': values})
# Group by scenario ...
grouped = df.groupby('scenario_id').agg(self.recorder_agg_func)
# ... and update the internal values
for index, row in grouped.iterrows():
self._values[index] = row['value']
| gpl-3.0 |
tony810430/flink | flink-python/pyflink/table/tests/test_pandas_udaf.py | 3 | 37046 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import expressions as expr
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class BatchPandasUDAFITTests(PyFlinkBlinkBatchTableTestCase):
def test_check_result_type(self):
def pandas_udaf():
pass
with self.assertRaises(
TypeError,
msg="Invalid returnType: Pandas UDAF doesn't support DataType type MAP currently"):
udaf(pandas_udaf, result_type=DataTypes.MAP(DataTypes.INT(), DataTypes.INT()),
func_type="pandas")
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(),
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())])])
self.t_env.register_table_sink("Results", table_sink)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: (a.max(), a.min()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.group_by("a") \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 6.0, +I[5, 2]]", "+I[2, 3.0, +I[3, 2]]", "+I[3, 3.0, +I[2, 2]]"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a'],
[DataTypes.INT()])
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
self.t_env.register_table_sink("Results", table_sink)
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[5]"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.TINYINT(), DataTypes.INT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
t.group_by("a") \
.select("a, a + 1 as b, a + 2 as c") \
.group_by("a, b") \
.select("a, b, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2, 2.0, 6]", "+I[2, 3, 3.0, 8]", "+I[3, 4, 4.0, 10]"])
def test_tumble_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by("w") \
.select("w.start, w.end, mean_udaf(b)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.2]",
"+I[2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]"])
def test_slide_group_window_aggregate_function(self):
import datetime
from pyflink.table.window import Slide
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
slide_window = Slide.over(expr.lit(1).hours) \
.every(expr.lit(30).minutes) \
.on(expr.col("rowtime")) \
.alias("w")
t.window(slide_window) \
.group_by("a, w") \
.select("a, w.start, w.end, mean_udaf(b), max_add(b, c, 1)") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 6]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5, 7]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5, 14]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0, 14]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0, 4]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 10]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0, 10]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0, 7]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0, 7]"])
def test_over_window_aggregate_function(self):
import datetime
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(), DataTypes.FLOAT(),
DataTypes.FLOAT(), DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
self.t_env.register_table("T", t)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING),
max_add(b, c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND 0 FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from T
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 4.3333335, 5, 4.3333335, 3.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[1, 4.3333335, 13, 5.5, 3.0, 3.0, 4.3333335, 8.0, 5.0, 5.0]",
"+I[1, 4.3333335, 6, 4.3333335, 2.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[2, 2.0, 9, 2.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0]",
"+I[2, 2.0, 3, 2.0, 2.0, 4.0, 1.0, 2.0, 4.0, 2.0]",
"+I[3, 2.0, 3, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]"])
class StreamPandasUDAFITTests(PyFlinkBlinkStreamTableTestCase):
def test_sliding_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("1.hours").every("30.minutes").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]",
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.5]",
"+I[1, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 5.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 8.0]",
"+I[2, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 1.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[2, 2018-03-11 03:30:00.0, 2018-03-11 04:30:00.0, 3.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2.0]",
"+I[3, 2018-03-11 02:30:00.0, 2018-03-11 03:30:00.0, 2.0]"])
os.remove(source_path)
def test_sliding_group_window_over_proctime(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
from pyflink.table.window import Slide
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a INT,
proctime as PROCTIME()
) with(
'connector' = 'datagen',
'rows-per-second' = '1',
'fields.a.kind' = 'sequence',
'fields.a.start' = '1',
'fields.a.end' = '10'
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
iterator = t.select("a, proctime") \
.window(Slide.over("1.seconds").every("1.seconds").on("proctime").alias("w")) \
.group_by("a, w") \
.select("mean_udaf(a) as b, w.start").execute().collect()
result = [i for i in iterator]
# if the WindowAssigner.isEventTime() does not return false,
# the w.start would be 1970-01-01
# TODO: After fixing the TimeZone problem of window with processing time (will be fixed in
# FLIP-162), we should replace it with a more accurate assertion.
self.assertTrue(result[0][1].year > 1970)
def test_sliding_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Slide.over("2.rows").every("1.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 5.5]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_tumbling_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e'],
[
DataTypes.TINYINT(),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("1.hours").on("rowtime").alias("w")) \
.group_by("a, b, w") \
.select("a, w.start, w.end, w.rowtime, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, [
"+I[1, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.5]",
"+I[1, 2018-03-11 04:00:00.0, 2018-03-11 05:00:00.0, 2018-03-11 04:59:59.999, 8.0]",
"+I[2, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
"+I[3, 2018-03-11 03:00:00.0, 2018-03-11 04:00:00.0, 2018-03-11 03:59:59.999, 2.0]",
])
os.remove(source_path)
def test_tumbling_group_window_over_count(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00',
'1,1,4,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_group_window_aggregate_function_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
table_sink = source_sink_utils.TestAppendSink(
['a', 'd'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT()])
self.t_env.register_table_sink("Results", table_sink)
t.window(Tumble.over("2.rows").on("protime").alias("w")) \
.group_by("a, b, w") \
.select("a, mean_udaf(c) as b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 6.0]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_row_time_over_range_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_range_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 3.0, 6]",
"+I[1, 3.0, 6]",
"+I[1, 8.0, 16]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_row_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_proc_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "ProcessingTime")
self.t_env.register_function("mean_udaf", mean_udaf)
self.t_env.register_function("max_add_min_udaf", max_add_min_udaf)
source_table = """
create table source_table(
a TINYINT,
b SMALLINT,
proctime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TINYINT(),
DataTypes.FLOAT(),
DataTypes.SMALLINT()])
self.t_env.register_table_sink("Results", table_sink)
self.t_env.execute_sql("""
insert into Results
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_execute_over_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan.csv'
sink_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
a TINYINT,
b FLOAT,
c SMALLINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type=DataTypes.SMALLINT(),
func_type='pandas')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
self.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
self.t_env.create_temporary_system_function("max_add_min_udaf", max_add_min_udaf)
json_plan = self.t_env._j_tenv.getJsonPlan("""
insert into sink_table
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from source_table
""")
from py4j.java_gateway import get_method
get_method(self.t_env._j_tenv.executeJsonPlan(json_plan), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,1.0,2', '1,3.0,6', '1,6.5,13', '2,1.0,2', '2,2.0,4', '3,2.0,4'])
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction, unittest.TestCase):
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
eliasrg/SURF2017 | code/plotting.py | 1 | 4470 | # Copyright (c) 2017 Elias Riedel Gårding
# Licensed under the MIT License
import matplotlib.pyplot as plt
import numpy as np
from joint.coding import SpiralMap
def plot_lloyd_max(distr, enc, dec, x_hit=None):
plt.figure()
plt.scatter(dec.levels, np.zeros(len(dec.levels)), color='red')
# plt.scatter(enc.boundaries, np.zeros(len(enc.boundaries)),
# color='purple', s=3)
for boundary in enc.boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.a, -20)
b = min(distr.b, 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def plot_lloyd_max_tracker(distr, enc, dec, tracker, x_hit=None):
plt.figure()
plt.scatter(dec.levels, np.zeros(len(dec.levels)), color='red')
# plt.scatter(enc.boundaries, np.zeros(len(enc.boundaries)),
# color='purple', s=3)
for boundary in enc.boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.a, -20)
b = min(distr.b, 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
plt.plot(tracker.x, tracker.fx, color='orange')
plt.plot(tracker.w_x, tracker.w_fx, color='purple')
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def plot_lloyd_max_hikmet(distr, boundaries, levels, x_hit=None):
plt.figure()
plt.scatter(levels, np.zeros(len(levels)), color='red')
# plt.scatter(boundaries, np.zeros(len(boundaries)),
# color='purple', s=3)
for boundary in boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.interval[0], -20)
b = min(distr.interval[1], 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def plot_lloyd_max_tracker_hikmet(distr, boundaries, levels, d1, fw, x_hit=None):
plt.figure()
plt.scatter(levels, np.zeros(len(levels)), color='red')
# plt.scatter(boundaries, np.zeros(len(boundaries)),
# color='purple', s=3)
for boundary in boundaries:
plt.plot([boundary, boundary], [-0.01, distr.pdf(boundary)], color='gray')
# plt.scatter([distr.mean()], [distr.pdf(distr.mean())], color='green')
if x_hit is not None: plt.scatter([x_hit], [0], marker='x')
a = max(distr.interval[0], -20)
b = min(distr.interval[1], 20)
x = np.linspace(a, b, num=10000)
plt.plot(x, distr.pdf(x))
plt.plot(x, (d1.interval[0] <= x) * (x <= d1.interval[1]) * d1.pdf(x), color='orange')
plt.plot(x, fw.pdf(x), color='purple')
# plt.xlim(-20, 20)
# plt.ylim(-0.05, 0.4)
plt.axis('tight')
def plot_spiral(spiral_map):
s = np.linspace(0, 7, num=1000)
plt.rcParams['lines.linewidth'] = 5
plt.plot(spiral_map.c * s, spiral_map.c * s, '--', color='orange')
plt.plot(-spiral_map.c * s, -spiral_map.c * s, '--', color='lightblue')
# Positive s
x, y = list(zip(*map(spiral_map.encode, s)))
plt.plot(x, y, 'orange')
# Negative s
x, y = list(zip(*map(spiral_map.encode, -s)))
plt.plot(x, y, 'lightblue')
plt.axis('square')
plt.axis([-22, 22, -22, 22])
plt.xlabel("First channel use ($a_{i_t}$)", fontsize=25)
plt.ylabel("Second channel use ($a_{i_t + 1}$)", fontsize=25)
def plot_spiral_decode(spiral_map=SpiralMap(2, 3)):
fig = plt.figure()
plot_spiral(spiral_map)
while True:
# Retrieve a point that the user clicks
points = []
while not points:
points = plt.ginput(1)
received = points[0]
s = spiral_map.decode(received)
decoded = spiral_map.encode(s)
plt.scatter([received[0]], [received[1]], color='tomato')
plt.plot([received[0], decoded[0]], [received[1], decoded[1]],
color='tomato')
plt.scatter([decoded[0]], [decoded[1]], color='tomato')
fig.canvas.draw()
| mit |
UDST/activitysim | example_multiple_zone/dump_data.py | 2 | 1574 | # ActivitySim
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
import openmatrix as omx
input_folder = "/Users/jeff.doyle/work/activitysim-data/sandag_zone/output/"
output_folder = "./output/"
data_file = 'NetworkData.h5'
skim_files = ['taz_skims.omx', 'tap_skims_locl.omx', 'tap_skims_prem.omx']
if __name__ == "__main__":
if data_file:
with pd.HDFStore(input_folder+data_file, mode='r') as hdf:
df = hdf['/TAZ']
df.to_csv(output_folder+'taz.csv', index=True)
df = hdf['/TAP']
df.to_csv(output_folder+'tap.csv', index=True)
for key in hdf.keys():
print "\n========== %s\n" % key
df = hdf[key]
print "len", len(df.index)
print df.columns.values
for c in ['TAZ', 'TAP', 'MAZ', 'OMAZ', 'DMAZ']:
if c in df.columns:
print "%s min: %s max: %s" % (c, df[c].min(), df[c].max())
if 'TAZ'in df.columns:
print df.TAZ.value_counts().head(20)
# print df
# process all skims
for skim_file in skim_files:
with omx.open_file(input_folder+skim_file) as skims:
# skims = omx.open_file(folder+skim_file)
print "\n##### %s %s" % (skim_file, skims.shape())
print "mappings:", skims.listMappings()
skimsToProcess = skims.listMatrices()
for skimName in skimsToProcess:
print skimName
# skims.close()
| bsd-3-clause |
jm-begon/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
rabernat/xray | xarray/tests/test_plot.py | 1 | 47643 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import mpl and change the backend before other mpl imports
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
pass
import inspect
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
from xarray import DataArray
import xarray.plot as xplt
from xarray.plot.plot import _infer_interval_breaks
from xarray.plot.utils import (_determine_cmap_params,
_build_discrete_cmap,
_color_palette, import_seaborn)
from . import TestCase, requires_matplotlib, requires_seaborn, raises_regex
@pytest.mark.flaky
@pytest.mark.skip(reason='maybe flaky')
def text_in_fig():
'''
Return the set of all text in the figure
'''
return {t.get_text() for t in plt.gcf().findobj(mpl.text.Text)}
def find_possible_colorbars():
# nb. this function also matches meshes from pcolormesh
return plt.gcf().findobj(mpl.collections.QuadMesh)
def substring_in_axes(substring, ax):
'''
Return True if a substring is found anywhere in an axes
'''
alltxt = set([t.get_text() for t in ax.findobj(mpl.text.Text)])
for txt in alltxt:
if substring in txt:
return True
return False
def easy_array(shape, start=0, stop=1):
'''
Make an array with desired shape using np.linspace
shape is a tuple like (2, 3)
'''
a = np.linspace(start, stop, num=np.prod(shape))
return a.reshape(shape)
@requires_matplotlib
class PlotTestCase(TestCase):
def tearDown(self):
# Remove all matplotlib figures
plt.close('all')
def pass_in_axis(self, plotmethod):
fig, axes = plt.subplots(ncols=2)
plotmethod(ax=axes[0])
self.assertTrue(axes[0].has_data())
@pytest.mark.slow
def imshow_called(self, plotmethod):
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod):
plotmethod()
paths = plt.gca().findobj(mpl.collections.PathCollection)
return len(paths) > 0
class TestPlot(PlotTestCase):
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test1d(self):
self.darray[:, 0, 0].plot()
def test_2d_before_squeeze(self):
a = DataArray(easy_array((1, 5)))
a.plot()
def test2d_uniform_calls_imshow(self):
self.assertTrue(self.imshow_called(self.darray[:, :, 0].plot.imshow))
@pytest.mark.slow
def test2d_nonuniform_calls_contourf(self):
a = self.darray[:, :, 0]
a.coords['dim_1'] = [2, 1, 89]
self.assertTrue(self.contourf_called(a.plot.contourf))
def test3d(self):
self.darray.plot()
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot)
def test__infer_interval_breaks(self):
self.assertArrayEqual([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1]))
self.assertArrayEqual([-0.5, 0.5, 5.0, 9.5, 10.5],
_infer_interval_breaks([0, 1, 9, 10]))
self.assertArrayEqual(
pd.date_range('20000101', periods=4) - np.timedelta64(12, 'h'),
_infer_interval_breaks(pd.date_range('20000101', periods=3)))
# make a bounded 2D array that we will center and re-infer
xref, yref = np.meshgrid(np.arange(6), np.arange(5))
cx = (xref[1:, 1:] + xref[:-1, :-1]) / 2
cy = (yref[1:, 1:] + yref[:-1, :-1]) / 2
x = _infer_interval_breaks(cx, axis=1)
x = _infer_interval_breaks(x, axis=0)
y = _infer_interval_breaks(cy, axis=1)
y = _infer_interval_breaks(y, axis=0)
np.testing.assert_allclose(xref, x)
np.testing.assert_allclose(yref, y)
def test_datetime_dimension(self):
nrow = 3
ncol = 4
time = pd.date_range('2000-01-01', periods=nrow)
a = DataArray(easy_array((nrow, ncol)),
coords=[('time', time), ('y', range(ncol))])
a.plot()
ax = plt.gca()
self.assertTrue(ax.has_data())
@pytest.mark.slow
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(x='x', y='y', col='z', col_wrap=2, cmap='cool')
self.assertArrayEqual(g.axes.shape, [2, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
with raises_regex(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='z', ax=plt.gca())
with raises_regex(ValueError, '[Ff]acet'):
d[0].plot(x='x', y='y', col='z', ax=plt.gca())
@pytest.mark.slow
def test_subplot_kws(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(x='x', y='y', col='z', col_wrap=2, cmap='cool',
subplot_kws=dict(facecolor='r'))
for ax in g.axes.flat:
try:
# mpl V2
self.assertEqual(ax.get_facecolor()[0:3],
mpl.colors.to_rgb('r'))
except AttributeError:
self.assertEqual(ax.get_axis_bgcolor(), 'r')
@pytest.mark.slow
def test_plot_size(self):
self.darray[:, 0, 0].plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(size=5)
assert plt.gcf().get_size_inches()[1] == 5
self.darray.plot(size=5, aspect=2)
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(ax=plt.gca(), figsize=(3, 4))
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(size=5, figsize=(3, 4))
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(size=5, ax=plt.gca())
with raises_regex(ValueError, 'cannot provide `aspect`'):
self.darray.plot(aspect=1)
@pytest.mark.slow
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = d.plot(x='x', y='y', col='columns', row='rows')
self.assertArrayEqual(g.axes.shape, [3, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
with raises_regex(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='columns', ax=plt.gca())
class TestPlot1D(PlotTestCase):
def setUp(self):
d = [0, 1.1, 0, 2]
self.darray = DataArray(d, coords={'period': range(len(d))},
dims='period')
def test_xlabel_is_index_name(self):
self.darray.plot()
self.assertEqual('period', plt.gca().get_xlabel())
def test_no_label_name_on_y_axis(self):
self.darray.plot()
self.assertEqual('', plt.gca().get_ylabel())
def test_ylabel_is_data_name(self):
self.darray.name = 'temperature'
self.darray.plot()
self.assertEqual(self.darray.name, plt.gca().get_ylabel())
def test_wrong_dims_raises_valueerror(self):
twodims = DataArray(easy_array((2, 5)))
with pytest.raises(ValueError):
twodims.plot.line()
def test_format_string(self):
self.darray.plot.line('ro')
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.line)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray([1, 2, 3], {'letter': ['a', 'b', 'c']},
dims='letter')
with raises_regex(TypeError, r'[Pp]lot'):
a.plot.line()
def test_primitive_returned(self):
p = self.darray.plot.line()
self.assertTrue(isinstance(p[0], mpl.lines.Line2D))
@pytest.mark.slow
def test_plot_nans(self):
self.darray[1] = np.nan
self.darray.plot.line()
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range('2000-01-01', '2000-01-10')
a = DataArray(np.arange(len(time)), [('t', time)])
a.plot.line()
rotation = plt.gca().get_xticklabels()[0].get_rotation()
self.assertFalse(rotation == 0)
def test_slice_in_title(self):
self.darray.coords['d'] = 10
self.darray.plot.line()
title = plt.gca().get_title()
self.assertEqual('d = 10', title)
class TestPlotHistogram(PlotTestCase):
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self):
self.darray.plot.hist()
def test_title_no_name(self):
self.darray.plot.hist()
self.assertEqual('', plt.gca().get_title())
def test_title_uses_name(self):
self.darray.name = 'testpoints'
self.darray.plot.hist()
self.assertIn(self.darray.name, plt.gca().get_title())
def test_ylabel_is_count(self):
self.darray.plot.hist()
self.assertEqual('Count', plt.gca().get_ylabel())
def test_can_pass_in_kwargs(self):
nbins = 5
self.darray.plot.hist(bins=nbins)
self.assertEqual(nbins, len(plt.gca().patches))
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self):
h = self.darray.plot.hist()
self.assertTrue(isinstance(h[-1][0], mpl.patches.Rectangle))
@pytest.mark.slow
def test_plot_nans(self):
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist()
@requires_matplotlib
class TestDetermineCmapParams(TestCase):
def setUp(self):
self.data = np.linspace(0, 1, num=100)
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
self.assertEqual(cmap_params['vmin'], np.percentile(self.data, 2))
self.assertEqual(cmap_params['vmax'], np.percentile(self.data, 98))
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'both')
self.assertIsNone(cmap_params['levels'])
self.assertIsNone(cmap_params['norm'])
def test_center(self):
cmap_params = _determine_cmap_params(self.data, center=0.5)
self.assertEqual(cmap_params['vmax'] - 0.5, 0.5 - cmap_params['vmin'])
self.assertEqual(cmap_params['cmap'], 'RdBu_r')
self.assertEqual(cmap_params['extend'], 'neither')
self.assertIsNone(cmap_params['levels'])
self.assertIsNone(cmap_params['norm'])
@pytest.mark.slow
def test_integer_levels(self):
data = self.data + 1
# default is to cover full data range but with no guarantee on Nlevels
for level in np.arange(2, 10, dtype=int):
cmap_params = _determine_cmap_params(data, levels=level)
self.assertEqual(cmap_params['vmin'], cmap_params['levels'][0])
self.assertEqual(cmap_params['vmax'], cmap_params['levels'][-1])
self.assertEqual(cmap_params['extend'], 'neither')
# with min max we are more strict
cmap_params = _determine_cmap_params(data, levels=5, vmin=0, vmax=5,
cmap='Blues')
self.assertEqual(cmap_params['vmin'], 0)
self.assertEqual(cmap_params['vmax'], 5)
self.assertEqual(cmap_params['vmin'], cmap_params['levels'][0])
self.assertEqual(cmap_params['vmax'], cmap_params['levels'][-1])
self.assertEqual(cmap_params['cmap'].name, 'Blues')
self.assertEqual(cmap_params['extend'], 'neither')
self.assertEqual(cmap_params['cmap'].N, 4)
self.assertEqual(cmap_params['norm'].N, 5)
cmap_params = _determine_cmap_params(data, levels=5,
vmin=0.5, vmax=1.5)
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'max')
cmap_params = _determine_cmap_params(data, levels=5,
vmin=1.5)
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'min')
cmap_params = _determine_cmap_params(data, levels=5,
vmin=1.3, vmax=1.5)
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'both')
def test_list_levels(self):
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(data, levels=orig_levels,
vmin=0, vmax=3)
self.assertEqual(cmap_params['vmin'], 0)
self.assertEqual(cmap_params['vmax'], 5)
self.assertEqual(cmap_params['cmap'].N, 5)
self.assertEqual(cmap_params['norm'].N, 6)
for wrap_levels in [list, np.array, pd.Index, DataArray]:
cmap_params = _determine_cmap_params(
data, levels=wrap_levels(orig_levels))
self.assertArrayEqual(cmap_params['levels'], orig_levels)
def test_divergentcontrol(self):
neg = self.data - 0.1
pos = self.data
# Default with positive data will be a normal cmap
cmap_params = _determine_cmap_params(pos)
self.assertEqual(cmap_params['vmin'], 0)
self.assertEqual(cmap_params['vmax'], 1)
self.assertEqual(cmap_params['cmap'].name, "viridis")
# Default with negative data will be a divergent cmap
cmap_params = _determine_cmap_params(neg)
self.assertEqual(cmap_params['vmin'], -0.9)
self.assertEqual(cmap_params['vmax'], 0.9)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
# Setting vmin or vmax should prevent this only if center is false
cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False)
self.assertEqual(cmap_params['vmin'], -0.1)
self.assertEqual(cmap_params['vmax'], 0.9)
self.assertEqual(cmap_params['cmap'].name, "viridis")
cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False)
self.assertEqual(cmap_params['vmin'], -0.1)
self.assertEqual(cmap_params['vmax'], 0.5)
self.assertEqual(cmap_params['cmap'].name, "viridis")
# Setting center=False too
cmap_params = _determine_cmap_params(neg, center=False)
self.assertEqual(cmap_params['vmin'], -0.1)
self.assertEqual(cmap_params['vmax'], 0.9)
self.assertEqual(cmap_params['cmap'].name, "viridis")
# However, I should still be able to set center and have a div cmap
cmap_params = _determine_cmap_params(neg, center=0)
self.assertEqual(cmap_params['vmin'], -0.9)
self.assertEqual(cmap_params['vmax'], 0.9)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
# Setting vmin or vmax alone will force symmetric bounds around center
cmap_params = _determine_cmap_params(neg, vmin=-0.1)
self.assertEqual(cmap_params['vmin'], -0.1)
self.assertEqual(cmap_params['vmax'], 0.1)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
cmap_params = _determine_cmap_params(neg, vmax=0.5)
self.assertEqual(cmap_params['vmin'], -0.5)
self.assertEqual(cmap_params['vmax'], 0.5)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1)
self.assertEqual(cmap_params['vmin'], -0.4)
self.assertEqual(cmap_params['vmax'], 0.6)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
# But this is only true if vmin or vmax are negative
cmap_params = _determine_cmap_params(pos, vmin=-0.1)
self.assertEqual(cmap_params['vmin'], -0.1)
self.assertEqual(cmap_params['vmax'], 0.1)
self.assertEqual(cmap_params['cmap'], "RdBu_r")
cmap_params = _determine_cmap_params(pos, vmin=0.1)
self.assertEqual(cmap_params['vmin'], 0.1)
self.assertEqual(cmap_params['vmax'], 1)
self.assertEqual(cmap_params['cmap'].name, "viridis")
cmap_params = _determine_cmap_params(pos, vmax=0.5)
self.assertEqual(cmap_params['vmin'], 0)
self.assertEqual(cmap_params['vmax'], 0.5)
self.assertEqual(cmap_params['cmap'].name, "viridis")
# If both vmin and vmax are provided, output is non-divergent
cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6)
self.assertEqual(cmap_params['vmin'], -0.2)
self.assertEqual(cmap_params['vmax'], 0.6)
self.assertEqual(cmap_params['cmap'].name, "viridis")
@requires_matplotlib
class TestDiscreteColorMap(TestCase):
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(('y', 'x'), (y, x))))
self.data_min = distance.min()
self.data_max = distance.max()
@pytest.mark.slow
def test_recover_from_seaborn_jet_exception(self):
pal = _color_palette('jet', 4)
self.assertTrue(type(pal) == np.ndarray)
self.assertEqual(len(pal), 4)
@pytest.mark.slow
def test_build_discrete_cmap(self):
for (cmap, levels, extend, filled) in [('jet', [0, 1], 'both', False),
('hot', [-4, 4], 'max', True)]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
self.assertEqual(ncmap.N, len(levels) - 1)
self.assertEqual(len(ncmap.colors), len(levels) - 1)
self.assertEqual(cnorm.N, len(levels))
self.assertArrayEqual(cnorm.boundaries, levels)
self.assertEqual(max(levels), cnorm.vmax)
self.assertEqual(min(levels), cnorm.vmin)
if filled:
self.assertEqual(ncmap.colorbar_extend, extend)
else:
self.assertEqual(ncmap.colorbar_extend, 'max')
@pytest.mark.slow
def test_discrete_colormap_list_of_levels(self):
for extend, levels in [('max', [-1, 2, 4, 8, 10]),
('both', [2, 5, 10, 11]),
('neither', [0, 5, 10, 15]),
('min', [2, 5, 10, 15])]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(levels=levels)
self.assertArrayEqual(levels, primitive.norm.boundaries)
self.assertEqual(max(levels), primitive.norm.vmax)
self.assertEqual(min(levels), primitive.norm.vmin)
if kind != 'contour':
self.assertEqual(extend, primitive.cmap.colorbar_extend)
else:
self.assertEqual('max', primitive.cmap.colorbar_extend)
self.assertEqual(len(levels) - 1, len(primitive.cmap.colors))
@pytest.mark.slow
def test_discrete_colormap_int_levels(self):
for extend, levels, vmin, vmax in [('neither', 7, None, None),
('neither', 7, None, 20),
('both', 7, 4, 8),
('min', 10, 4, 15)]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(levels=levels,
vmin=vmin,
vmax=vmax)
self.assertGreaterEqual(levels,
len(primitive.norm.boundaries) - 1)
if vmax is None:
self.assertGreaterEqual(primitive.norm.vmax, self.data_max)
else:
self.assertGreaterEqual(primitive.norm.vmax, vmax)
if vmin is None:
self.assertLessEqual(primitive.norm.vmin, self.data_min)
else:
self.assertLessEqual(primitive.norm.vmin, vmin)
if kind != 'contour':
self.assertEqual(extend, primitive.cmap.colorbar_extend)
else:
self.assertEqual('max', primitive.cmap.colorbar_extend)
self.assertGreaterEqual(levels, len(primitive.cmap.colors))
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self):
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20)
self.assertEqual(primitive.norm.vmax, max(levels))
self.assertEqual(primitive.norm.vmin, min(levels))
class Common2dMixin:
"""
Common tests for 2d plotting go here.
These tests assume that a staticmethod for `self.plotfunc` exists.
Should have the same name as the method.
"""
def setUp(self):
da = DataArray(easy_array(
(10, 15), start=-1), dims=['y', 'x'])
# add 2d coords
ds = da.to_dataset(name='testvar')
x, y = np.meshgrid(da.x.values, da.y.values)
ds['x2d'] = DataArray(x, dims=['y', 'x'])
ds['y2d'] = DataArray(y, dims=['y', 'x'])
ds.set_coords(['x2d', 'y2d'], inplace=True)
# set darray and plot method
self.darray = ds.testvar
self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__)
def test_label_names(self):
self.plotmethod()
self.assertEqual('x', plt.gca().get_xlabel())
self.assertEqual('y', plt.gca().get_ylabel())
def test_1d_raises_valueerror(self):
with raises_regex(ValueError, r'DataArray must be 2d'):
self.plotfunc(self.darray[0, :])
def test_3d_raises_valueerror(self):
a = DataArray(easy_array((2, 3, 4)))
with raises_regex(ValueError, r'DataArray must be 2d'):
self.plotfunc(a)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray(easy_array((3, 2)),
coords=[['a', 'b', 'c'], ['d', 'e']])
with raises_regex(TypeError, r'[Pp]lot'):
self.plotfunc(a)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod)
def test_xyincrease_false_changes_axes(self):
self.plotmethod(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0
self.assertTrue(all(abs(x) < 1 for x in diffs))
def test_xyincrease_true_changes_axes(self):
self.plotmethod(xincrease=True, yincrease=True)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9
self.assertTrue(all(abs(x) < 1 for x in diffs))
def test_plot_nans(self):
x1 = self.darray[:5]
x2 = self.darray.copy()
x2[5:] = np.nan
clim1 = self.plotfunc(x1).get_clim()
clim2 = self.plotfunc(x2).get_clim()
self.assertEqual(clim1, clim2)
def test_viridis_cmap(self):
cmap_name = self.plotmethod(cmap='viridis').get_cmap().name
self.assertEqual('viridis', cmap_name)
def test_default_cmap(self):
cmap_name = self.plotmethod().get_cmap().name
self.assertEqual('RdBu_r', cmap_name)
cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name
self.assertEqual('viridis', cmap_name)
@requires_seaborn
def test_seaborn_palette_as_cmap(self):
cmap_name = self.plotmethod(
levels=2, cmap='husl').get_cmap().name
self.assertEqual('husl', cmap_name)
def test_can_change_default_cmap(self):
cmap_name = self.plotmethod(cmap='Blues').get_cmap().name
self.assertEqual('Blues', cmap_name)
def test_diverging_color_limits(self):
artist = self.plotmethod()
vmin, vmax = artist.get_clim()
self.assertAlmostEqual(-vmin, vmax)
def test_xy_strings(self):
self.plotmethod('y', 'x')
ax = plt.gca()
self.assertEqual('y', ax.get_xlabel())
self.assertEqual('x', ax.get_ylabel())
def test_positional_coord_string(self):
self.plotmethod(y='x')
ax = plt.gca()
self.assertEqual('x', ax.get_ylabel())
self.assertEqual('y', ax.get_xlabel())
self.plotmethod(x='x')
ax = plt.gca()
self.assertEqual('x', ax.get_xlabel())
self.assertEqual('y', ax.get_ylabel())
def test_bad_x_string_exception(self):
with raises_regex(
ValueError, 'x and y must be coordinate variables'):
self.plotmethod('not_a_real_dim', 'y')
with raises_regex(
ValueError, 'x must be a dimension name if y is not supplied'):
self.plotmethod(x='not_a_real_dim')
with raises_regex(
ValueError, 'y must be a dimension name if x is not supplied'):
self.plotmethod(y='not_a_real_dim')
self.darray.coords['z'] = 100
def test_coord_strings(self):
# 1d coords (same as dims)
self.assertEqual({'x', 'y'}, set(self.darray.dims))
self.plotmethod(y='y', x='x')
def test_non_linked_coords(self):
# plot with coordinate names that are not dimensions
self.darray.coords['newy'] = self.darray.y + 150
# Normal case, without transpose
self.plotfunc(self.darray, x='x', y='newy')
ax = plt.gca()
self.assertEqual('x', ax.get_xlabel())
self.assertEqual('newy', ax.get_ylabel())
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
self.assertTrue(np.min(ax.get_ylim()) > 100.)
def test_non_linked_coords_transpose(self):
# plot with coordinate names that are not dimensions,
# and with transposed y and x axes
# This used to raise an error with pcolormesh and contour
# https://github.com/pydata/xarray/issues/788
self.darray.coords['newy'] = self.darray.y + 150
self.plotfunc(self.darray, x='newy', y='x')
ax = plt.gca()
self.assertEqual('newy', ax.get_xlabel())
self.assertEqual('x', ax.get_ylabel())
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
self.assertTrue(np.min(ax.get_xlim()) > 100.)
def test_default_title(self):
a = DataArray(easy_array((4, 3, 2)), dims=['a', 'b', 'c'])
a.coords['c'] = [0, 1]
a.coords['d'] = u'foo'
self.plotfunc(a.isel(c=1))
title = plt.gca().get_title()
self.assertTrue('c = 1, d = foo' == title or 'd = foo, c = 1' == title)
def test_colorbar_default_label(self):
self.darray.name = 'testvar'
self.plotmethod(add_colorbar=True)
self.assertIn(self.darray.name, text_in_fig())
def test_no_labels(self):
self.darray.name = 'testvar'
self.plotmethod(add_labels=False)
alltxt = text_in_fig()
for string in ['x', 'y', 'testvar']:
self.assertNotIn(string, alltxt)
def test_colorbar_kwargs(self):
# replace label
self.darray.name = 'testvar'
self.plotmethod(add_colorbar=True, cbar_kwargs={'label':'MyLabel'})
alltxt = text_in_fig()
self.assertIn('MyLabel', alltxt)
self.assertNotIn('testvar', alltxt)
# you can use mapping types as well
self.plotmethod(add_colorbar=True, cbar_kwargs=(('label', 'MyLabel'),))
alltxt = text_in_fig()
self.assertIn('MyLabel', alltxt)
self.assertNotIn('testvar', alltxt)
# change cbar ax
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(ax=ax, cbar_ax=cax, add_colorbar=True,
cbar_kwargs={'label':'MyBar'})
self.assertTrue(ax.has_data())
self.assertTrue(cax.has_data())
alltxt = text_in_fig()
self.assertIn('MyBar', alltxt)
self.assertNotIn('testvar', alltxt)
# note that there are two ways to achieve this
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(ax=ax, add_colorbar=True,
cbar_kwargs={'label':'MyBar', 'cax':cax})
self.assertTrue(ax.has_data())
self.assertTrue(cax.has_data())
alltxt = text_in_fig()
self.assertIn('MyBar', alltxt)
self.assertNotIn('testvar', alltxt)
# see that no colorbar is respected
self.plotmethod(add_colorbar=False)
self.assertNotIn('testvar', text_in_fig())
# check that error is raised
pytest.raises(ValueError, self.plotmethod,
add_colorbar=False, cbar_kwargs= {'label':'label'})
def test_verbose_facetgrid(self):
a = easy_array((10, 15, 3))
d = DataArray(a, dims=['y', 'x', 'z'])
g = xplt.FacetGrid(d, col='z')
g.map_dataarray(self.plotfunc, 'x', 'y')
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
def test_2d_function_and_method_signature_same(self):
func_sig = inspect.getcallargs(self.plotfunc, self.darray)
method_sig = inspect.getcallargs(self.plotmethod)
del method_sig['_PlotMethods_obj']
del func_sig['darray']
self.assertEqual(func_sig, method_sig)
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
g = self.plotfunc(d, x='x', y='y', col='z', col_wrap=2)
self.assertArrayEqual(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
self.assertTrue(ax.has_data())
if x == 0:
self.assertEqual('y', ax.get_ylabel())
else:
self.assertEqual('', ax.get_ylabel())
if y == 1:
self.assertEqual('x', ax.get_xlabel())
else:
self.assertEqual('', ax.get_xlabel())
# Infering labels
g = self.plotfunc(d, col='z', col_wrap=2)
self.assertArrayEqual(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
self.assertTrue(ax.has_data())
if x == 0:
self.assertEqual('y', ax.get_ylabel())
else:
self.assertEqual('', ax.get_ylabel())
if y == 1:
self.assertEqual('x', ax.get_xlabel())
else:
self.assertEqual('', ax.get_xlabel())
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = self.plotfunc(d, x='x', y='y', col='columns', row='rows')
self.assertArrayEqual(g.axes.shape, [3, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
def test_facetgrid_cmap(self):
# Regression test for GH592
data = (np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12))
d = DataArray(data, dims=['x', 'y', 'time'])
fg = d.plot.pcolormesh(col='time')
# check that all color limits are the same
self.assertEqual(len(set(m.get_clim() for m in fg._mappables)), 1)
# check that all colormaps are the same
self.assertEqual(len(set(m.get_cmap().name for m in fg._mappables)), 1)
@pytest.mark.slow
class TestContourf(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contourf)
@pytest.mark.slow
def test_contourf_called(self):
# Having both statements ensures the test works properly
self.assertFalse(self.contourf_called(self.darray.plot.imshow))
self.assertTrue(self.contourf_called(self.darray.plot.contourf))
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.contour.QuadContourSet))
@pytest.mark.slow
def test_extend(self):
artist = self.plotmethod()
self.assertEqual(artist.extend, 'neither')
self.darray[0, 0] = -100
self.darray[-1, -1] = 100
artist = self.plotmethod(robust=True)
self.assertEqual(artist.extend, 'both')
self.darray[0, 0] = 0
self.darray[-1, -1] = 0
artist = self.plotmethod(vmin=-0, vmax=10)
self.assertEqual(artist.extend, 'min')
artist = self.plotmethod(vmin=-10, vmax=0)
self.assertEqual(artist.extend, 'max')
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
@pytest.mark.slow
def test_levels(self):
artist = self.plotmethod(levels=[-0.5, -0.4, 0.1])
self.assertEqual(artist.extend, 'both')
artist = self.plotmethod(levels=3)
self.assertEqual(artist.extend, 'neither')
@pytest.mark.slow
class TestContour(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
def test_colors(self):
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuple
def _color_as_tuple(c):
return tuple(c[:3])
artist = self.plotmethod(colors='k')
self.assertEqual(
_color_as_tuple(artist.cmap.colors[0]),
(0.0, 0.0, 0.0))
artist = self.plotmethod(colors=['k', 'b'])
self.assertEqual(
_color_as_tuple(artist.cmap.colors[1]),
(0.0, 0.0, 1.0))
artist = self.darray.plot.contour(levels=[-0.5, 0., 0.5, 1.],
colors=['k', 'r', 'w', 'b'])
self.assertEqual(
_color_as_tuple(artist.cmap.colors[1]),
(1.0, 0.0, 0.0))
self.assertEqual(
_color_as_tuple(artist.cmap.colors[2]),
(1.0, 1.0, 1.0))
# the last color is now under "over"
self.assertEqual(
_color_as_tuple(artist.cmap._rgba_over),
(0.0, 0.0, 1.0))
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors='k', cmap='RdBu')
def list_of_colors_in_cmap_deprecated(self):
with pytest.raises(Exception):
self.plotmethod(cmap=['k', 'b'])
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
def test_single_level(self):
# this used to raise an error, but not anymore since
# add_colorbar defaults to false
self.plotmethod(levels=[0.1])
self.plotmethod(levels=1)
class TestPcolormesh(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.pcolormesh)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.collections.QuadMesh))
def test_everything_plotted(self):
artist = self.plotmethod()
self.assertEqual(artist.get_array().size, self.darray.size)
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
def test_dont_infer_interval_breaks_for_cartopy(self):
# Regression for GH 781
ax = plt.gca()
# Simulate a Cartopy Axis
setattr(ax, 'projection', True)
artist = self.plotmethod(x='x2d', y='y2d', ax=ax)
self.assertTrue(isinstance(artist, mpl.collections.QuadMesh))
# Let cartopy handle the axis limits and artist size
self.assertTrue(artist.get_array().size <= self.darray.size)
@pytest.mark.slow
class TestImshow(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.imshow)
@pytest.mark.slow
def test_imshow_called(self):
# Having both statements ensures the test works properly
self.assertFalse(self.imshow_called(self.darray.plot.contourf))
self.assertTrue(self.imshow_called(self.darray.plot.imshow))
def test_xy_pixel_centered(self):
self.darray.plot.imshow(yincrease=False)
self.assertTrue(np.allclose([-0.5, 14.5], plt.gca().get_xlim()))
self.assertTrue(np.allclose([9.5, -0.5], plt.gca().get_ylim()))
def test_default_aspect_is_auto(self):
self.darray.plot.imshow()
self.assertEqual('auto', plt.gca().get_aspect())
@pytest.mark.slow
def test_cannot_change_mpl_aspect(self):
with raises_regex(ValueError, 'not available in xarray'):
self.darray.plot.imshow(aspect='equal')
# with numbers we fall back to fig control
self.darray.plot.imshow(size=5, aspect=2)
self.assertEqual('auto', plt.gca().get_aspect())
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
@pytest.mark.slow
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.image.AxesImage))
@pytest.mark.slow
@requires_seaborn
def test_seaborn_palette_needs_levels(self):
with pytest.raises(ValueError):
self.plotmethod(cmap='husl')
def test_2d_coord_names(self):
with raises_regex(ValueError, 'requires 1D coordinates'):
self.plotmethod(x='x2d', y='y2d')
class TestFacetGrid(PlotTestCase):
def setUp(self):
d = easy_array((10, 15, 3))
self.darray = DataArray(d, dims=['y', 'x', 'z'],
coords={'z': ['a', 'b', 'c']})
self.g = xplt.FacetGrid(self.darray, col='z')
@pytest.mark.slow
def test_no_args(self):
self.g.map_dataarray(xplt.contourf, 'x', 'y')
# Don't want colorbar labeled with 'None'
alltxt = text_in_fig()
self.assertNotIn('None', alltxt)
for ax in self.g.axes.flat:
self.assertTrue(ax.has_data())
@pytest.mark.slow
def test_names_appear_somewhere(self):
self.darray.name = 'testvar'
self.g.map_dataarray(xplt.contourf, 'x', 'y')
for k, ax in zip('abc', self.g.axes.flat):
self.assertEqual('z = {0}'.format(k), ax.get_title())
alltxt = text_in_fig()
self.assertIn(self.darray.name, alltxt)
for label in ['x', 'y']:
self.assertIn(label, alltxt)
@pytest.mark.slow
def test_text_not_super_long(self):
self.darray.coords['z'] = [100 * letter for letter in 'abc']
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.contour, 'x', 'y')
alltxt = text_in_fig()
maxlen = max(len(txt) for txt in alltxt)
self.assertLess(maxlen, 50)
t0 = g.axes[0, 0].get_title()
self.assertTrue(t0.endswith('...'))
@pytest.mark.slow
def test_colorbar(self):
vmin = self.darray.values.min()
vmax = self.darray.values.max()
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y')
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
self.assertTrue(np.allclose(expected, clim))
self.assertEqual(1, len(find_possible_colorbars()))
@pytest.mark.slow
def test_empty_cell(self):
g = xplt.FacetGrid(self.darray, col='z', col_wrap=2)
g.map_dataarray(xplt.imshow, 'x', 'y')
bottomright = g.axes[-1, -1]
self.assertFalse(bottomright.has_data())
self.assertFalse(bottomright.get_visible())
@pytest.mark.slow
def test_norow_nocol_error(self):
with raises_regex(ValueError, r'[Rr]ow'):
xplt.FacetGrid(self.darray)
@pytest.mark.slow
def test_groups(self):
self.g.map_dataarray(xplt.imshow, 'x', 'y')
upperleft_dict = self.g.name_dicts[0, 0]
upperleft_array = self.darray.loc[upperleft_dict]
z0 = self.darray.isel(z=0)
self.assertDataArrayEqual(upperleft_array, z0)
@pytest.mark.slow
def test_float_index(self):
self.darray.coords['z'] = [0.1, 0.2, 0.4]
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y')
@pytest.mark.slow
def test_nonunique_index_error(self):
self.darray.coords['z'] = [0.1, 0.2, 0.2]
with raises_regex(ValueError, r'[Uu]nique'):
xplt.FacetGrid(self.darray, col='z')
@pytest.mark.slow
def test_robust(self):
z = np.zeros((20, 20, 2))
darray = DataArray(z, dims=['y', 'x', 'z'])
darray[:, :, 1] = 1
darray[2, 0, 0] = -1000
darray[3, 0, 0] = 1000
g = xplt.FacetGrid(darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y', robust=True)
# Color limits should be 0, 1
# The largest number displayed in the figure should be less than 21
numbers = set()
alltxt = text_in_fig()
for txt in alltxt:
try:
numbers.add(float(txt))
except ValueError:
pass
largest = max(abs(x) for x in numbers)
self.assertLess(largest, 21)
@pytest.mark.slow
def test_can_set_vmin_vmax(self):
vmin, vmax = 50.0, 1000.0
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y', vmin=vmin, vmax=vmax)
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
self.assertTrue(np.allclose(expected, clim))
@pytest.mark.slow
def test_can_set_norm(self):
norm = mpl.colors.SymLogNorm(0.1)
self.g.map_dataarray(xplt.imshow, 'x', 'y', norm=norm)
for image in plt.gcf().findobj(mpl.image.AxesImage):
self.assertIs(image.norm, norm)
@pytest.mark.slow
def test_figure_size(self):
self.assertArrayEqual(self.g.fig.get_size_inches(), (10, 3))
g = xplt.FacetGrid(self.darray, col='z', size=6)
self.assertArrayEqual(g.fig.get_size_inches(), (19, 6))
g = self.darray.plot.imshow(col='z', size=6)
self.assertArrayEqual(g.fig.get_size_inches(), (19, 6))
g = xplt.FacetGrid(self.darray, col='z', size=4, aspect=0.5)
self.assertArrayEqual(g.fig.get_size_inches(), (7, 4))
g = xplt.FacetGrid(self.darray, col='z', figsize=(9, 4))
self.assertArrayEqual(g.fig.get_size_inches(), (9, 4))
with raises_regex(ValueError, "cannot provide both"):
g = xplt.plot(self.darray, row=2, col='z', figsize=(6, 4), size=6)
with raises_regex(ValueError, "Can't use"):
g = xplt.plot(self.darray, row=2, col='z', ax=plt.gca(), size=6)
@pytest.mark.slow
def test_num_ticks(self):
nticks = 99
maxticks = nticks + 1
self.g.map_dataarray(xplt.imshow, 'x', 'y')
self.g.set_ticks(max_xticks=nticks, max_yticks=nticks)
for ax in self.g.axes.flat:
xticks = len(ax.get_xticks())
yticks = len(ax.get_yticks())
self.assertLessEqual(xticks, maxticks)
self.assertLessEqual(yticks, maxticks)
self.assertGreaterEqual(xticks, nticks / 2.0)
self.assertGreaterEqual(yticks, nticks / 2.0)
@pytest.mark.slow
def test_map(self):
self.g.map(plt.contourf, 'x', 'y', Ellipsis)
self.g.map(lambda: None)
@pytest.mark.slow
def test_map_dataset(self):
g = xplt.FacetGrid(self.darray.to_dataset(name='foo'), col='z')
g.map(plt.contourf, 'x', 'y', 'foo')
alltxt = text_in_fig()
for label in ['x', 'y']:
self.assertIn(label, alltxt)
# everything has a label
self.assertNotIn('None', alltxt)
# colorbar can't be inferred automatically
self.assertNotIn('foo', alltxt)
self.assertEqual(0, len(find_possible_colorbars()))
g.add_colorbar(label='colors!')
self.assertIn('colors!', text_in_fig())
self.assertEqual(1, len(find_possible_colorbars()))
@pytest.mark.slow
def test_set_axis_labels(self):
g = self.g.map_dataarray(xplt.contourf, 'x', 'y')
g.set_axis_labels('longitude', 'latitude')
alltxt = text_in_fig()
for label in ['longitude', 'latitude']:
self.assertIn(label, alltxt)
@pytest.mark.slow
def test_facetgrid_colorbar(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'], name='foo')
d.plot.imshow(x='x', y='y', col='z')
self.assertEqual(1, len(find_possible_colorbars()))
d.plot.imshow(x='x', y='y', col='z', add_colorbar=True)
self.assertEqual(1, len(find_possible_colorbars()))
d.plot.imshow(x='x', y='y', col='z', add_colorbar=False)
self.assertEqual(0, len(find_possible_colorbars()))
@pytest.mark.slow
def test_facetgrid_polar(self):
# test if polar projection in FacetGrid does not raise an exception
self.darray.plot.pcolormesh(col='z',
subplot_kws=dict(projection='polar'),
sharex=False, sharey=False)
class TestFacetGrid4d(PlotTestCase):
def setUp(self):
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=['y', 'x', 'col', 'row'])
darray.coords['col'] = np.array(['col' + str(x) for x in
darray.coords['col'].values])
darray.coords['row'] = np.array(['row' + str(x) for x in
darray.coords['row'].values])
self.darray = darray
@pytest.mark.slow
def test_default_labels(self):
g = xplt.FacetGrid(self.darray, col='col', row='row')
self.assertEqual((2, 3), g.axes.shape)
g.map_dataarray(xplt.imshow, 'x', 'y')
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords['row'].values, g.axes[:, -1]):
self.assertTrue(substring_in_axes(label, ax))
# Top row should be labeled
for label, ax in zip(self.darray.coords['col'].values, g.axes[0, :]):
self.assertTrue(substring_in_axes(label, ax))
class TestDatetimePlot(PlotTestCase):
def setUp(self):
'''
Create a DataArray with a time-axis that contains datetime objects.
'''
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=['time'])
darray.coords['time'] = np.array([datetime(2017, m, 1) for m in month])
self.darray = darray
def test_datetime_line_plot(self):
# test if line plot raises no Exception
self.darray.plot.line()
@requires_seaborn
def test_import_seaborn_no_warning():
# GH1633
with pytest.warns(None) as record:
import_seaborn()
assert len(record) == 0
@requires_matplotlib
def test_plot_seaborn_no_import_warning():
# GH1633
with pytest.warns(None) as record:
_color_palette('Blues', 4)
assert len(record) == 0
| apache-2.0 |
samklr/spark-timeseries | python/sparkts/timeseriesrdd.py | 4 | 11346 | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from .utils import datetime_to_nanos
from .datetimeindex import DateTimeIndex, irregular
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.mapToPair( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.api.java.JavaTimeSeriesRDDFactory.timeSeriesRDD( \
dt_index._jdt_index, jrdd)
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a JavaTimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jtsrdd.map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_nanos(val.start)
stop = datetime_to_nanos(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def fill(self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, series_rdd.ctx)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.api.java.JavaTimeSeriesRDDFactory.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_nanos = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_nanos), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
| apache-2.0 |
jpzk/evopy | evopy/examples/experiments/beta_dsessvc/setup.py | 1 | 5607 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from copy import deepcopy
from numpy import matrix, log10
from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.strategies.ori_dses import ORIDSES
from evopy.simulators.simulator import Simulator
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
def get_method_SphereProblemR1_svc(beta):
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = DSESSVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = ORIDSESSVC(\
mu = 15,
lambd = 100,
theta = 0.3,
pi = 15,
initial_sigma = matrix([[5.0, 5.0]]),
delta = 4.5,
tau0 = 0.5,
tau1 = 0.6,
initial_pos = matrix([[10.0, 10.0]]),
beta = beta,
meta_model = meta_model)
return method
def get_method_SphereProblemR2_svc(beta):
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = DSESSVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = ORIDSESSVC(\
mu = 15,
lambd = 100,
theta = 0.3,
pi = 15,
initial_sigma = matrix([[5.0, 5.0]]),
delta = 4.5,
tau0 = 0.5,
tau1 = 0.6,
initial_pos = matrix([[10.0, 10.0]]),
beta = beta,
meta_model = meta_model)
return method
def get_method_TR_svc(beta):
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = DSESSVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = ORIDSESSVC(\
mu = 15,
lambd = 100,
theta = 0.3,
pi = 15,
initial_sigma = matrix([[4.5, 4.5]]),
delta = 4.5,
tau0 = 0.5,
tau1 = 0.6,
initial_pos = matrix([[10.0, 10.0]]),
beta = beta,
meta_model = meta_model)
return method
def get_method_Schwefel26_svc(beta):
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = DSESSVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = ORIDSESSVC(\
mu = 15,
lambd = 100,
theta = 0.3,
pi = 15,
initial_sigma = matrix([[34.0, 36.0]]),
delta = 36.0,
tau0 = 0.5,
tau1 = 0.6,
initial_pos = matrix([[100.0, 100.0]]),
beta = beta,
meta_model = meta_model)
return method
betas = map(lambda b : b / 10.0, range(0, 11))
def create_problem_optimizer_map(typeofelements):
t = typeofelements
beta_map = {}
for beta in betas:
beta_map[beta] = deepcopy(t)
return {\
TRProblem: deepcopy(beta_map),\
SphereProblemOriginR1: deepcopy(beta_map),\
SphereProblemOriginR2: deepcopy(beta_map),\
SchwefelsProblem26: deepcopy(beta_map)}
samples = 100
termination = Generations(50)
problems = [TRProblem, SphereProblemOriginR1,\
SphereProblemOriginR2, SchwefelsProblem26]
optimizers = {\
TRProblem: get_method_TR_svc,
SphereProblemOriginR1: get_method_SphereProblemR1_svc,
SphereProblemOriginR2: get_method_SphereProblemR2_svc,
SchwefelsProblem26: get_method_Schwefel26_svc}
beta_map = {}
for beta in betas:
beta_map[beta] = []
simulators = {\
TRProblem: deepcopy(beta_map),
SphereProblemOriginR1: deepcopy(beta_map),
SphereProblemOriginR2: deepcopy(beta_map),
SchwefelsProblem26: deepcopy(beta_map)
}
cfcs = create_problem_optimizer_map([])
| gpl-3.0 |
sharadmv/trees | scripts/profile.py | 1 | 1050 | import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import numpy as np
import trees
from trees.ddt import *
import mpld3
import seaborn as sns
sns.set_style('white')
from tqdm import tqdm
from sklearn.decomposition import PCA
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--iterations', type=int, default=1000)
argparser.add_argument('--N', type=int, default=20)
args = argparser.parse_args()
X, y = trees.data.load('zoo')
pca = PCA(n_components=2)
X = pca.fit_transform(X)
X += np.random.normal(scale=.15, size=X.shape)
X = X[0:args.N]
y = y[0:args.N]
N, D = X.shape
df = Inverse(c=5)
lm = GaussianLikelihoodModel(sigma=np.cov(X.T) / 2.0, sigma0=np.eye(D) / 10.0, mu0=X.mean(axis=0)).compile()
sampler = MetropolisHastingsSampler(DirichletDiffusionTree(df, lm), X)
sampler.initialize_assignments()
def iterate(n_iters):
lls = []
for i in tqdm(xrange(n_iters)):
sampler.sample()
lls.append(sampler.ddt.marg_log_likelihood())
return lls
iterate(args.iterations)
| mit |
Odingod/mne-python | mne/decoding/time_gen.py | 5 | 36854 | # Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Clement Moutard <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import copy
from ..io.pick import pick_types
from ..viz.decoding import plot_gat_matrix, plot_gat_times
from ..parallel import parallel_func, check_n_jobs
class _DecodingTime(dict):
"""A dictionary to configure the training times that has the following keys:
'slices' : np.ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
'start' : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
'stop' : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
'step' : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
'length' : float
Duration of each classifier (in seconds). Defaults to one time sample.
If None, empty dict. Defaults to None."""
def __repr__(self):
s = ""
if "start" in self:
s += "start: %0.3f (s)" % (self["start"])
if "stop" in self:
s += ", stop: %0.3f (s)" % (self["stop"])
if "step" in self:
s += ", step: %0.3f (s)" % (self["step"])
if "length" in self:
s += ", length: %0.3f (s)" % (self["length"])
if "slices" in self:
# identify depth: training times only contains n_time but
# testing_times can contain n_times or n_times * m_times
depth = [len(ii) for ii in self["slices"]]
if len(np.unique(depth)) == 1: # if all slices have same depth
if depth[0] == 1: # if depth is one
s += ", n_time_windows: %s" % (len(depth))
else:
s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
else:
s += (", n_time_windows: %s x [%s, %s]" %
(len(depth),
min([len(ii) for ii in depth]),
max(([len(ii) for ii in depth]))))
return "<DecodingTime | %s>" % s
class GeneralizationAcrossTime(object):
"""Generalize across time and conditions
Creates and estimator object used to 1) fit a series of classifiers on
multidimensional time-resolved data, and 2) test the ability of each
classifier to generalize across other time samples.
Parameters
----------
picks : array-like of int | None, optional
Channels to be included. If None only good data channels are used.
Defaults to None.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
Defaults to 5.
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and LogisticRegression with default parameters.
train_times : dict | None
A dictionary to configure the training times:
``slices`` : np.ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
``length`` : float
Duration of each classifier (in seconds).
Defaults to one time sample.
If None, empty dict. Defaults to None.
test_times : 'diagonal' | dict | None, optional
Configures the testing times.
If set to 'diagonal', predictions are made at the time at which
each classifier is trained.
If set to None, predictions are made at all time points.
If set to dict, the dict should contain ``slices`` or be contructed in
a similar way to train_times
``slices`` : np.ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
If None, empty dict. Defaults to None.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
``mean-prediction`` : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
scorer : object | None
scikit-learn Scorer instance. If None, set to accuracy_score.
Defaults to None.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
picks_ : array-like of int
Channels to be included.
ch_names : list, shape (n_channels,)
Names of the channels used for training.
y_train_ : list | np.ndarray, shape (n_samples,)
The categories used for training.
train_times_ : dict
A dictionary that configures the training times:
``slices`` : np.ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``times`` : np.ndarray, shape (n_clfs,)
The training times (in seconds).
test_times_ : dict
A dictionary that configures the testing times for each training time:
``slices`` : np.ndarray, shape (n_clfs, n_testing_times)
Array of time slices (in indices) used for each classifier.
``times`` : np.ndarray, shape (n_clfs, n_testing_times)
The testing times (in seconds) for each training time.
estimators_ : list of list of sklearn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
cv_ : CrossValidation object
The actual CrossValidation input depending on y.
y_pred_ : list of lists of arrays of floats,
shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
The single-trial predictions estimated by self.predict() at each
training time and each testing time. Note that the number of testing
times per training time need not be regular, else
np.shape(y_pred_) = [n_train_time, n_test_time, n_epochs].
y_true_ : list | np.ndarray, shape (n_samples,)
The categories used for scoring y_pred_.
scorer_ : object
scikit-learn Scorer instance.
scores_ : list of lists of float
The scores estimated by self.scorer_ at each training time and each
testing time (e.g. mean accuracy of self.predict(X)). Note that the
number of testing times per training time need not be regular;
else, np.shape(scores) = [n_train_time, n_test_time].
Notes
-----
The function implements the method used in:
Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
and Stanislas Dehaene, "Two distinct dynamic modes subtend the
detection of unexpected sounds", PLoS ONE, 2014
DOI: 10.1371/journal.pone.0085791
.. versionadded:: 0.9.0
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_mode='cross-validation', scorer=None,
n_jobs=1):
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# Store parameters in object
self.cv = cv
# Define training sliding window
self.train_times = (_DecodingTime() if train_times is None
else _DecodingTime(train_times))
# Define testing sliding window. If None, will be set in predict()
if test_times is None:
self.test_times = _DecodingTime()
elif test_times == 'diagonal':
self.test_times = 'diagonal'
else:
self.test_times = _DecodingTime(test_times)
# Default classification pipeline
if clf is None:
scaler = StandardScaler()
estimator = LogisticRegression()
clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
self.clf = clf
self.predict_mode = predict_mode
self.scorer = scorer
self.picks = picks
self.n_jobs = n_jobs
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.train_times_['start'], self.train_times_['stop'])
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<GAT | %s>" % s
def fit(self, epochs, y=None):
""" Train a classifier on each specified time slice.
Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or np.ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Defaults to None.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
from sklearn.base import clone
from sklearn.cross_validation import check_cv, StratifiedKFold
# clean attributes
for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
'estimators_', 'test_times_', 'y_pred_', 'y_true_',
'scores_', 'scorer_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
# Extract data from MNE structure
X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
self.ch_names = [epochs.ch_names[p] for p in self.picks_]
cv = self.cv
if isinstance(cv, (int, np.int)):
cv = StratifiedKFold(y, cv)
cv = check_cv(cv, X, y, classifier=True)
self.cv_ = cv # update CV
self.y_train_ = y
# Cross validation scheme
# XXX Cross validation should later be transformed into a make_cv, and
# defined in __init__
self.train_times_ = copy.deepcopy(self.train_times)
if 'slices' not in self.train_times_:
self.train_times_ = _sliding_window(epochs.times, self.train_times)
# Parallel across training time
parallel, p_time_gen, n_jobs = parallel_func(_fit_slices, n_jobs)
n_chunks = min(X.shape[2], n_jobs)
splits = np.array_split(self.train_times_['slices'], n_chunks)
def f(x):
return np.unique(np.concatenate(x))
out = parallel(p_time_gen(clone(self.clf),
X[..., f(train_slices_chunk)],
y, train_slices_chunk, cv)
for train_slices_chunk in splits)
# Unpack estimators into time slices X folds list of lists.
self.estimators_ = sum(out, list())
return self
def predict(self, epochs):
""" Test each classifier on each specified testing time slice.
Note. This function sets the ``y_pred_`` and ``test_times_``
attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats,
shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
The single-trial predictions at each training time and each testing
time. Note that the number of testing times per training time need
not be regular;
else, np.shape(y_pred_) = [n_train_time, n_test_time, n_epochs].
"""
# Check that at least one classifier has been trained
if not hasattr(self, 'estimators_'):
raise RuntimeError('Please fit models before trying to predict')
# clean attributes
for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
X, y, _ = _check_epochs_input(epochs, None, self.picks_)
# Define testing sliding window
if self.test_times == 'diagonal':
test_times = _DecodingTime()
test_times['slices'] = [[s] for s in self.train_times_['slices']]
test_times['times'] = [[s] for s in self.train_times_['times']]
elif isinstance(self.test_times, dict):
test_times = copy.deepcopy(self.test_times)
else:
raise ValueError('`test_times` must be a dict or "diagonal"')
if 'slices' not in test_times:
# Force same number of time sample in testing than in training
# (otherwise it won 't be the same number of features')
window_param = dict(length=self.train_times_['length'])
# Make a sliding window for each training time.
slices_list = list()
times_list = list()
for t in range(0, len(self.train_times_['slices'])):
test_times_ = _sliding_window(epochs.times, window_param)
times_list += [test_times_['times']]
slices_list += [test_times_['slices']]
test_times = test_times_
test_times['slices'] = slices_list
test_times['times'] = times_list
# Store all testing times parameters
self.test_times_ = test_times
# Prepare parallel predictions
parallel, p_time_gen, _ = parallel_func(_predict_time_loop, n_jobs)
# Loop across estimators (i.e. training times)
self.y_pred_ = parallel(p_time_gen(X, self.estimators_[t_train],
self.cv_, slices, self.predict_mode)
for t_train, slices in
enumerate(self.test_times_['slices']))
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
Note. The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | np.ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Defaults to None.
Returns
-------
scores : list of lists of float
The scores estimated by ``scorer_`` at each training time and each
testing time (e.g. mean accuracy of ``predict(X)``). Note that the
number of testing times per training time need not be regular;
else, np.shape(scores) = [n_train_time, n_test_time].
"""
from sklearn.metrics import accuracy_score
# Run predictions if not already done
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
# clean gat.score() attributes
for att in ['scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
# Check scorer
# XXX Need API to identify proper scorer from the clf
self.scorer_ = accuracy_score if self.scorer is None else self.scorer
# If no regressor is passed, use default epochs events
if y is None:
if self.predict_mode == 'cross-validation':
y = self.y_train_
else:
if epochs is not None:
y = epochs.events[:, 2]
else:
raise RuntimeError('y is undefined because'
'predict_mode="mean-prediction" and '
'epochs are missing. You need to '
'explicitly specify y.')
if not np.all(np.unique(y) == np.unique(self.y_train_)):
raise ValueError('Classes (y) passed differ from classes used '
'for training. Please explicitly pass your y '
'for scoring.')
elif isinstance(y, list):
y = np.array(y)
self.y_true_ = y # to be compared with y_pred for scoring
# Preprocessing for parallelization:
n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
parallel, p_time_gen, n_jobs = parallel_func(_score_loop, n_jobs)
# Score each training and testing time point
scores = parallel(p_time_gen(self.y_true_, self.y_pred_[t_train],
slices, self.scorer_)
for t_train, slices
in enumerate(self.test_times_['slices']))
self.scores_ = scores
return scores
def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Plot the score of each classifier at each tested time window.
Parameters
----------
title : str | None
Figure title. Defaults to None.
vmin : float | None
Min color value for scores. If None, sets to min(gat.scores_).
Defaults to None.
vmax : float | None
Max color value for scores. If None, sets to max(gat.scores_).
Defaults to None.
tlim : np.ndarray, (train_min, test_max) | None
The temporal boundaries. Defaults to None.
ax : object | None
Plot pointer. If None, generate new figure. Defaults to None.
cmap : str | cmap object
The color map to be used. Defaults to 'RdBu_r'.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
tlim=tlim, ax=ax, cmap=cmap, show=show,
colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot each classifier score trained and tested at identical time
windows.
Parameters
----------
title : str | None
Figure title. Defaults to None.
xmin : float | None, optional
Min time value. Defaults to None.
xmax : float | None, optional
Max time value. Defaults to None.
ymin : float | None, optional
Min score value. If None, sets to min(scores). Defaults to None.
ymax : float | None, optional
Max score value. If None, sets to max(scores). Defaults to None.
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
Defaults to None.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
def plot_times(self, train_time, title=None, xmin=None, xmax=None,
ymin=None, ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at specific training time(s).
Parameters
----------
train_time : float | list or array of float
Plots scores of the classifier trained at train_time.
title : str | None
Figure title. Defaults to None.
xmin : float | None, optional
Min time value. Defaults to None.
xmax : float | None, optional
Max time value. Defaults to None.
ymin : float | None, optional
Min score value. If None, sets to min(scores). Defaults to None.
ymax : float | None, optional
Max score value. If None, sets to max(scores). Defaults to None.
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
Defaults to None.
show : bool
If True, the figure will be shown. Defaults to True.
color : str or list of str
Score line color(s).
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer. Defaults to None.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if (not isinstance(train_time, float) and
not (isinstance(train_time, (list, np.ndarray)) and
np.all([isinstance(time, float) for time in train_time]))):
raise ValueError('train_time must be float | list or array of '
'floats. Got %s.' % type(train_time))
return plot_gat_times(self, train_time=train_time, title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
def _predict_time_loop(X, estimators, cv, slices, predict_mode):
"""Aux function of GeneralizationAcrossTime
Run classifiers predictions loop across time samples.
Parameters
----------
X : np.ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
estimators : arraylike, shape (n_times, n_folds)
Array of Sklearn classifiers fitted in cross-validation.
slices : list
List of slices selecting data from X from which is prediction is
generated.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
'cross-validation' : estimates a single prediction per sample based
on the unique independent classifier fitted in the cross-
validation.
'mean-prediction' : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
"""
n_epochs = len(X)
# Loop across testing slices
y_pred = [list() for _ in range(len(slices))]
# XXX EHN: This loop should be parallelized in a similar way to fit()
for t, indices in enumerate(slices):
# Flatten features in case of multiple time samples
Xtrain = X[:, :, indices].reshape(
n_epochs, np.prod(X[:, :, indices].shape[1:]))
# Single trial predictions
if predict_mode == 'cross-validation':
# If predict within cross validation, only predict with
# corresponding classifier, else predict with each fold's
# classifier and average prediction.
# Check that training cv and predicting cv match
if (len(estimators) != cv.n_folds) or (cv.n != Xtrain.shape[0]):
raise ValueError(
'When `predict_mode = "cross-validation"`, the training '
'and predicting cv schemes must be identical.')
for k, (train, test) in enumerate(cv):
# XXX I didn't manage to initialize correctly this array, as
# its size depends on the the type of predictor and the
# number of class.
if k == 0:
y_pred_ = _predict(Xtrain[test, :], estimators[k:k + 1])
y_pred[t] = np.empty((n_epochs, y_pred_.shape[1]))
y_pred[t][test, :] = y_pred_
y_pred[t][test, :] = _predict(Xtrain[test, :],
estimators[k:k + 1])
elif predict_mode == 'mean-prediction':
y_pred[t] = _predict(Xtrain, estimators)
else:
raise ValueError('`predict_mode` must be a str, "mean-prediction"'
' or "cross-validation"')
return y_pred
def _score_loop(y_true, y_pred, slices, scorer):
n_time = len(slices)
# Loop across testing times
scores = [0] * n_time
for t, indices in enumerate(slices):
# Scores across trials
scores[t] = scorer(y_true, y_pred[t])
return scores
def _check_epochs_input(epochs, y, picks=None):
"""Aux function of GeneralizationAcrossTime
Format MNE data into scikit-learn X and y
Parameters
----------
epochs : instance of Epochs
The epochs.
y : np.ndarray shape (n_epochs) | list shape (n_epochs) | None
To-be-fitted model. If y is None, y == epochs.events.
Defaults to None.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None.
Returns
-------
X : np.ndarray, shape (n_epochs, n_selected_chans, n_times)
To-be-fitted data.
y : np.ndarray, shape (n_epochs,)
To-be-fitted model.
picks : np.ndarray, shape (n_selected_chans,)
The channels to be used.
"""
if y is None:
y = epochs.events[:, 2]
elif isinstance(y, list):
y = np.array(y)
# Convert MNE data into trials x features x time matrix
X = epochs.get_data()
# Pick channels
if picks is None: # just use good data channels
picks = pick_types(epochs.info, meg=True, eeg=True, seeg=True,
eog=False, ecg=False, misc=False, stim=False,
ref_meg=False, exclude='bads')
if isinstance(picks, (list, np.ndarray)):
picks = np.array(picks, dtype=np.int)
else:
raise ValueError('picks must be a list or a numpy.ndarray of int')
X = X[:, picks, :]
# Check data sets
assert X.shape[0] == y.shape[0]
return X, y, picks
def _fit_slices(clf, x_chunk, y, slices, cv):
"""Aux function of GeneralizationAcrossTime
Fit each classifier.
Parameters
----------
clf : scikit-learn classifier
The classifier object.
x_chunk : np.ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
y : list | array, shape (n_epochs,)
To-be-fitted model.
slices : list | array, shape (n_training_slice,)
List of training slices, indicating time sample relative to X
cv : scikit-learn cross-validation generator
A cross-validation generator to use.
Returns
-------
estimators : list of lists of estimators
List of fitted scikit-learn classifiers corresponding to each training
slice.
"""
from sklearn.base import clone
# Initialize
n_epochs = len(x_chunk)
estimators = list()
# Identify the time samples of X_chunck corresponding to X
values = np.unique(np.concatenate(slices))
indices = range(len(values))
# Loop across time slices
for t_slice in slices:
# Translate absolute time samples into time sample relative to x_chunk
for ii in indices:
t_slice[t_slice == values[ii]] = indices[ii]
# Select slice
X = x_chunk[..., t_slice]
# Reshape data matrix to flatten features in case of multiple time
# samples.
X = X.reshape(n_epochs, np.prod(X.shape[1:]))
# Loop across folds
estimators_ = list()
for fold, (train, test) in enumerate(cv):
# Fit classifier
clf_ = clone(clf)
clf_.fit(X[train, :], y[train])
estimators_.append(clf_)
# Store classifier
estimators.append(estimators_)
return estimators
def _sliding_window(times, window_params):
"""Aux function of GeneralizationAcrossTime
Define the slices on which to train each classifier.
Parameters
----------
times : np.ndarray, shape (n_times,)
Array of times from MNE epochs.
window_params : dict keys: ('start', 'stop', 'step', 'length')
Either train or test times. See GAT documentation.
Returns
-------
time_pick : list
List of training slices, indicating for each classifier the time
sample (in indices of times) to be fitted on.
"""
window_params = _DecodingTime(window_params)
# Sampling frequency as int
freq = (times[-1] - times[0]) / len(times)
# Default values
if ('slices' in window_params and
all(k in window_params for k in
('start', 'stop', 'step', 'length'))):
time_pick = window_params['slices']
else:
if 'start' not in window_params:
window_params['start'] = times[0]
if 'stop' not in window_params:
window_params['stop'] = times[-1]
if 'step' not in window_params:
window_params['step'] = freq
if 'length' not in window_params:
window_params['length'] = freq
# Convert seconds to index
def find_time_idx(t): # find closest time point
return np.argmin(np.abs(np.asarray(times) - t))
start = find_time_idx(window_params['start'])
stop = find_time_idx(window_params['stop'])
step = int(round(window_params['step'] / freq))
length = int(round(window_params['length'] / freq))
# For each training slice, give time samples to be included
time_pick = [range(start, start + length)]
while (time_pick[-1][0] + step) <= (stop - length + 1):
start = time_pick[-1][0] + step
time_pick.append(range(start, start + length))
window_params['slices'] = time_pick
# Keep last training times in milliseconds
t_inds_ = [t[-1] for t in window_params['slices']]
window_params['times'] = times[t_inds_]
return window_params
def _predict(X, estimators):
"""Aux function of GeneralizationAcrossTime
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
estimators : np.ndarray, shape (n_folds,) | shape (1,)
Array of scikit-learn classifiers to predict data.
X : np.ndarray, shape (n_epochs, n_features, n_times)
To-be-predicted data
Returns
-------
y_pred : np.ndarray, shape (n_epochs, m_prediction_dimensions)
Classifier's prediction for each trial.
"""
from scipy import stats
from sklearn.base import is_classifier
# Initialize results:
n_epochs = X.shape[0]
n_clf = len(estimators)
# Compute prediction for each sub-estimator (i.e. per fold)
# if independent, estimators = all folds
for fold, clf in enumerate(estimators):
_y_pred = clf.predict(X)
# initialize predict_results array
if fold == 0:
predict_size = _y_pred.shape[1] if _y_pred.ndim > 1 else 1
y_pred = np.ones((n_epochs, predict_size, n_clf))
if predict_size == 1:
y_pred[:, 0, fold] = _y_pred
else:
y_pred[:, :, fold] = _y_pred
# Collapse y_pred across folds if necessary (i.e. if independent)
if fold > 0:
# XXX need API to identify how multiple predictions can be combined?
if is_classifier(clf):
y_pred, _ = stats.mode(y_pred, axis=2)
else:
y_pred = np.mean(y_pred, axis=2)
# Format shape
y_pred = y_pred.reshape((n_epochs, predict_size))
return y_pred
def _time_gen_one_fold(clf, X, y, train, test, scoring):
"""Aux function of time_generalization"""
from sklearn.metrics import SCORERS
n_times = X.shape[2]
scores = np.zeros((n_times, n_times))
scorer = SCORERS[scoring]
for t_train in range(n_times):
X_train = X[train, :, t_train]
clf.fit(X_train, y[train])
for t_test in range(n_times):
X_test = X[test, :, t_test]
scores[t_test, t_train] += scorer(clf, X_test, y[test])
return scores
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/sparse/test_array.py | 4 | 33223 | from pandas.compat import range
import re
import operator
import pytest
import warnings
from numpy import nan
import numpy as np
from pandas.core.sparse.api import SparseArray, SparseSeries
from pandas._libs.sparse import IntIndex
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
class TestSparseArray(object):
def setup_method(self, method):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == np.float64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == np.object
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == np.object
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == np.object
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
tm.assert_sp_array_equal(arr, SparseArray([np.nan, 1, 2, np.nan]))
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
# scalar input
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_sparseseries_roundtrip(self):
# GH 13999
for kind in ['integer', 'block']:
for fill in [1, np.nan, 0]:
arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind,
fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
for fill in [True, False, np.nan]:
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[11])
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[-11])
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take(self):
assert np.isnan(self.arr.take(0))
assert np.isscalar(self.arr.take(2))
assert self.arr.take(2) == np.take(self.arr_data, 2)
assert self.arr.take(6) == np.take(self.arr_data, 6)
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
tm.assert_raises_regex(
IndexError, "bounds", lambda: self.arr.take(11))
pytest.raises(IndexError, lambda: self.arr.take(-11))
def test_take_invalid_kwargs(self):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, self.arr.take,
[2, 3], foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, self.arr.take,
[2, 3], out=self.arr)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, self.arr.take,
[2, 3], mode='clip')
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([0, np.nan, 0], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
tm.assert_raises_regex(TypeError, "item assignment", setitem)
tm.assert_raises_regex(TypeError, "item assignment", setslice)
def test_constructor_from_too_large_array(self):
tm.assert_raises_regex(TypeError, "expected dimension <= 1 data",
SparseArray, np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == bool
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == np.bool
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == np.bool
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == np.bool
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == np.float32
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
res = self.arr.astype('f8')
res.sp_values[:3] = 27
assert not (self.arr.sp_values[:3] == 27).any()
msg = "unable to coerce current fill_value nan to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
self.arr.astype('i8')
arr = SparseArray([0, np.nan, 0, 1])
with tm.assert_raises_regex(ValueError, msg):
arr.astype('i8')
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer'
with tm.assert_raises_regex(ValueError, msg):
arr.astype('i8')
def test_astype_all(self):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = arr.astype(typ)
assert res.dtype == typ
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(res.values, vals.astype(typ))
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# coerces to int
msg = "unable to set fill_value 3\\.1 to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 3.1
msg = "unable to set fill_value nan to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
msg = "unable to set fill_value 0 to bool dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 0
msg = "unable to set fill_value nan to bool dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
# invalid
msg = "fill_value must be a scalar"
for val in [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]:
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = val
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
def _get_base(values):
base = values.base
while base.base is not None:
base = base.base
return base
assert (_get_base(arr2) is _get_base(self.arr))
def test_values_asarray(self):
assert_almost_equal(self.arr.values, self.arr_data)
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([1, np.nan, 0, 3, 0])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
SparseArray(vals).to_dense(fill=2)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.values[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.values[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.values[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_binary_operators(self):
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# ignore this if the actual op raises (e.g. pow)
try:
exp = op(first.values, 4)
exp_fv = op(first.fill_value, 4)
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.values, exp)
except ValueError:
pass
def _check_inplace_op(op):
tmp = arr1.copy()
pytest.raises(NotImplementedError, op, tmp, arr2)
with np.errstate(all='ignore'):
bin_ops = [operator.add, operator.sub, operator.mul,
operator.truediv, operator.floordiv, operator.pow]
for op in bin_ops:
_check_op(op, arr1, arr2)
_check_op(op, farr1, farr2)
inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv',
'ipow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, op))
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == np.int64
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == np.int64
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == np.int64
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
class TestSparseArrayAnalytics(object):
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.all,
SparseArray(data), out=out)
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.any,
SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), out=out)
def test_cumsum(self):
non_null_data = np.array([1, 2, 3, 4, 5], dtype=float)
non_null_expected = SparseArray(non_null_data.cumsum())
null_data = np.array([1, 2, np.nan, 4, 5], dtype=float)
null_expected = SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0]))
for data, expected in [
(null_data, null_expected),
(non_null_data, non_null_expected)
]:
out = SparseArray(data).cumsum()
tm.assert_sp_array_equal(out, expected)
out = SparseArray(data, fill_value=np.nan).cumsum()
tm.assert_sp_array_equal(out, expected)
out = SparseArray(data, fill_value=2).cumsum()
tm.assert_sp_array_equal(out, expected)
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
SparseArray(data).cumsum(axis=axis)
def test_numpy_cumsum(self):
non_null_data = np.array([1, 2, 3, 4, 5], dtype=float)
non_null_expected = SparseArray(non_null_data.cumsum())
null_data = np.array([1, 2, np.nan, 4, 5], dtype=float)
null_expected = SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0]))
for data, expected in [
(null_data, null_expected),
(non_null_data, non_null_expected)
]:
out = np.cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = np.cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = np.cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), out=out)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
| bsd-3-clause |
zakandrewking/cobrapy | cobra/core/solution.py | 1 | 10636 | # -*- coding: utf-8 -*-
"""Provide unified interfaces to optimization solutions."""
from __future__ import absolute_import
import logging
from builtins import object, super
from warnings import warn
from numpy import empty, nan
from optlang.interface import OPTIMAL
from pandas import Series, DataFrame, option_context
from cobra.util.solver import check_solver_status
__all__ = ("Solution", "LegacySolution", "get_solution")
LOGGER = logging.getLogger(__name__)
class Solution(object):
"""
A unified interface to a `cobra.Model` optimization solution.
Notes
-----
Solution is meant to be constructed by `get_solution` please look at that
function to fully understand the `Solution` class.
Attributes
----------
objective_value : float
The (optimal) value for the objective function.
status : str
The solver status related to the solution.
fluxes : pandas.Series
Contains the reaction fluxes (primal values of variables).
reduced_costs : pandas.Series
Contains reaction reduced costs (dual values of variables).
shadow_prices : pandas.Series
Contains metabolite shadow prices (dual values of constraints).
Deprecated Attributes
---------------------
f : float
Use `objective_value` instead.
x : list
Use `fluxes.values` instead.
x_dict : pandas.Series
Use `fluxes` instead.
y : list
Use `reduced_costs.values` instead.
y_dict : pandas.Series
Use `reduced_costs` instead.
"""
def __init__(self, objective_value, status, fluxes, reduced_costs=None,
shadow_prices=None, **kwargs):
"""
Initialize a `Solution` from its components.
Parameters
----------
objective_value : float
The (optimal) value for the objective function.
status : str
The solver status related to the solution.
fluxes : pandas.Series
Contains the reaction fluxes (primal values of variables).
reduced_costs : pandas.Series
Contains reaction reduced costs (dual values of variables).
shadow_prices : pandas.Series
Contains metabolite shadow prices (dual values of constraints).
"""
super(Solution, self).__init__(**kwargs)
self.objective_value = objective_value
self.status = status
self.fluxes = fluxes
self.reduced_costs = reduced_costs
self.shadow_prices = shadow_prices
def __repr__(self):
"""String representation of the solution instance."""
if self.status != OPTIMAL:
return "<Solution {0:s} at 0x{1:x}>".format(self.status, id(self))
return "<Solution {0:.3f} at 0x{1:x}>".format(self.objective_value,
id(self))
def _repr_html_(self):
if self.status == OPTIMAL:
with option_context('display.max_rows', 10):
html = ('<strong><em>Optimal</em> solution with objective '
'value {:.3f}</strong><br>{}'
.format(self.objective_value,
self.to_frame()._repr_html_()))
else:
html = '<strong><em>{}</em> solution</strong>'.format(self.status)
return html
def __dir__(self):
"""Hide deprecated attributes and methods from the public interface."""
fields = sorted(dir(type(self)) + list(self.__dict__))
fields.remove('f')
fields.remove('x')
fields.remove('y')
fields.remove('x_dict')
fields.remove('y_dict')
return fields
def __getitem__(self, reaction_id):
"""
Return the flux of a reaction.
Parameters
----------
reaction : str
A model reaction ID.
"""
return self.fluxes[reaction_id]
get_primal_by_id = __getitem__
@property
def f(self):
"""Deprecated property for getting the objective value."""
warn("use solution.objective_value instead", DeprecationWarning)
return self.objective_value
@property
def x_dict(self):
"""Deprecated property for getting fluxes."""
warn("use solution.fluxes instead", DeprecationWarning)
return self.fluxes
@x_dict.setter
def x_dict(self, fluxes):
"""Deprecated property for setting fluxes."""
warn("let Model.optimize create a solution instance,"
" don't update yourself", DeprecationWarning)
self.fluxes = fluxes
@property
def x(self):
"""Deprecated property for getting flux values."""
warn("use solution.fluxes.values() instead", DeprecationWarning)
return self.fluxes.values
@property
def y_dict(self):
"""Deprecated property for getting reduced costs."""
warn("use solution.reduced_costs instead", DeprecationWarning)
return self.reduced_costs
@y_dict.setter
def y_dict(self, costs):
"""Deprecated property for setting reduced costs."""
warn("let Model create a solution instance, don't update yourself",
DeprecationWarning)
self.reduced_costs = costs
@property
def y(self):
"""Deprecated property for getting reduced cost values."""
warn("use solution.reduced_costs.values() instead", DeprecationWarning)
return self.reduced_costs.values
def to_frame(self):
"""Return the fluxes and reduced costs as a data frame"""
return DataFrame({'fluxes': self.fluxes,
'reduced_costs': self.reduced_costs})
class LegacySolution(object):
"""
Legacy support for an interface to a `cobra.Model` optimization solution.
Attributes
----------
f : float
The objective value
solver : str
A string indicating which solver package was used.
x : iterable
List or Array of the fluxes (primal values).
x_dict : dict
A dictionary of reaction IDs that maps to the respective primal values.
y : iterable
List or Array of the dual values.
y_dict : dict
A dictionary of reaction IDs that maps to the respective dual values.
Warning
-------
The LegacySolution class and its interface is deprecated.
"""
def __init__(self, f, x=None, x_dict=None, y=None, y_dict=None,
solver=None, the_time=0, status='NA', **kwargs):
"""
Initialize a `LegacySolution` from an objective value.
Parameters
----------
f : float
Objective value.
solver : str, optional
A string indicating which solver package was used.
x : iterable, optional
List or Array of the fluxes (primal values).
x_dict : dict, optional
A dictionary of reaction IDs that maps to the respective primal
values.
y : iterable, optional
List or Array of the dual values.
y_dict : dict, optional
A dictionary of reaction IDs that maps to the respective dual
values.
the_time : int, optional
status : str, optional
.. warning :: deprecated
"""
super(LegacySolution, self).__init__(**kwargs)
self.solver = solver
self.f = f
self.x = x
self.x_dict = x_dict
self.status = status
self.y = y
self.y_dict = y_dict
def __repr__(self):
"""String representation of the solution instance."""
if self.status != "optimal":
return "<LegacySolution {0:s} at 0x{1:x}>".format(
self.status, id(self))
return "<LegacySolution {0:.3f} at 0x{1:x}>".format(
self.f, id(self))
def __getitem__(self, reaction_id):
"""
Return the flux of a reaction.
Parameters
----------
reaction_id : str
A reaction ID.
"""
return self.x_dict[reaction_id]
def dress_results(self, model):
"""
Method could be intended as a decorator.
.. warning :: deprecated
"""
warn("unnecessary to call this deprecated function",
DeprecationWarning)
def get_solution(model, reactions=None, metabolites=None, raise_error=False):
"""
Generate a solution representation of the current solver state.
Parameters
---------
model : cobra.Model
The model whose reactions to retrieve values for.
reactions : list, optional
An iterable of `cobra.Reaction` objects. Uses `model.reactions` by
default.
metabolites : list, optional
An iterable of `cobra.Metabolite` objects. Uses `model.metabolites` by
default.
raise_error : bool
If true, raise an OptimizationError if solver status is not optimal.
Returns
-------
cobra.Solution
Note
----
This is only intended for the `optlang` solver interfaces and not the
legacy solvers.
"""
check_solver_status(model.solver.status, raise_error=raise_error)
if reactions is None:
reactions = model.reactions
if metabolites is None:
metabolites = model.metabolites
rxn_index = list()
fluxes = empty(len(reactions))
reduced = empty(len(reactions))
var_primals = model.solver.primal_values
shadow = empty(len(metabolites))
if model.solver.is_integer:
reduced.fill(nan)
shadow.fill(nan)
for (i, rxn) in enumerate(reactions):
rxn_index.append(rxn.id)
fluxes[i] = var_primals[rxn.id] - var_primals[rxn.reverse_id]
met_index = [met.id for met in metabolites]
else:
var_duals = model.solver.reduced_costs
for (i, rxn) in enumerate(reactions):
forward = rxn.id
reverse = rxn.reverse_id
rxn_index.append(forward)
fluxes[i] = var_primals[forward] - var_primals[reverse]
reduced[i] = var_duals[forward] - var_duals[reverse]
met_index = list()
constr_duals = model.solver.shadow_prices
for (i, met) in enumerate(metabolites):
met_index.append(met.id)
shadow[i] = constr_duals[met.id]
return Solution(model.solver.objective.value, model.solver.status,
Series(index=rxn_index, data=fluxes, name="fluxes"),
Series(index=rxn_index, data=reduced,
name="reduced_costs"),
Series(index=met_index, data=shadow, name="shadow_prices"))
| lgpl-2.1 |
nelango/ViralityAnalysis | model/lib/sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| mit |
vigilv/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/sandbox/tsa/garch.py | 25 | 52178 | '''general non-linear MLE for time series analysis
idea for general version
------------------------
subclass defines geterrors(parameters) besides loglike,...
and covariance matrix of parameter estimates (e.g. from hessian
or outerproduct of jacobian)
update: I don't really need geterrors directly, but get_h the conditional
variance process
new version Garch0 looks ok, time to clean up and test
no constraints yet
in some cases: "Warning: Maximum number of function evaluations has been exceeded."
Notes
-----
idea: cache intermediate design matrix for geterrors so it doesn't need
to be build at each function call
superclass or result class calculates result statistic based
on errors, loglike, jacobian and cov/hessian
-> aic, bic, ...
-> test statistics, tvalue, fvalue, ...
-> new to add: distribution (mean, cov) of non-linear transformation
-> parameter restrictions or transformation with corrected covparams (?)
-> sse, rss, rsquared ??? are they defined from this in general
-> robust parameter cov ???
-> additional residual based tests, NW, ... likelihood ratio, lagrange
multiplier tests ???
how much can be reused from linear model result classes where
`errorsest = y - X*beta` ?
for tsa: what's the division of labor between model, result instance
and process
examples:
* arma: ls and mle look good
* arimax: add exog, especially mean, trend, prefilter, e.g. (1-L)
* arma_t: arma with t distributed errors (just a change in loglike)
* garch: need loglike and (recursive) errorest
* regime switching model without unobserved state, e.g. threshold
roadmap for garch:
* simple case
* starting values: garch11 explicit formulas
* arma-garch, assumed separable, blockdiagonal Hessian
* empirical example: DJI, S&P500, MSFT, ???
* other standard garch: egarch, pgarch,
* non-normal distributions
* other methods: forecast, news impact curves (impulse response)
* analytical gradient, Hessian for basic garch
* cleaner simulation of garch
* result statistics, AIC, ...
* parameter constraints
* try penalization for higher lags
* other garch: regime-switching
for pgarch (power garch) need transformation of etax given
the parameters, but then misofilter should work
general class aparch (see garch glossary)
References
----------
see notes_references.txt
Created on Feb 6, 2010
@author: "josef pktd"
'''
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import optimize, signal
import matplotlib.pyplot as plt
import numdifftools as ndt
from statsmodels.base.model import Model, LikelihoodModelResults
from statsmodels.sandbox import tsa
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def normloglike(x, mu=0, sigma2=1, returnlls=False, axis=0):
x = np.asarray(x)
x = np.atleast_1d(x)
if axis is None:
x = x.ravel()
#T,K = x.shape
if x.ndim > 1:
nobs = x.shape[axis]
else:
nobs = len(x)
x = x - mu # assume can be broadcasted
if returnlls:
#Compute the individual log likelihoods if needed
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + x**2/sigma2)
# Use these to comput the LL
LL = np.sum(lls,axis)
return LL, lls
else:
#Compute the log likelihood
#print(np.sum(np.log(sigma2),axis))
LL = -0.5 * (np.sum(np.log(sigma2),axis) + np.sum((x**2)/sigma2, axis) + nobs*np.log(2*np.pi))
return LL
# copied from model.py
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None):
super(LikelihoodModel, self).__init__(endog, exog)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
#TODO: if the intent is to re-initialize the model with new data then
# this method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
An optional
method : str
Method can be 'newton', 'bfgs', 'powell', 'cg', or 'ncg'.
The default is newton. See scipy.optimze for more information.
"""
methods = ['newton', 'bfgs', 'powell', 'cg', 'ncg', 'fmin']
if start_params is None:
start_params = [0]*self.exog.shape[1] # will fail for shape (K,)
if not method in methods:
raise ValueError("Unknown fit method %s" % method)
f = lambda params: -self.loglike(params)
score = lambda params: -self.score(params)
# hess = lambda params: -self.hessian(params)
hess = None
#TODO: can we have a unified framework so that we can just do func = method
# and write one call for each solver?
if method.lower() == 'newton':
iteration = 0
start = np.array(start_params)
history = [np.inf, start]
while (iteration < maxiter and np.all(np.abs(history[-1] - \
history[-2])>tol)):
H = self.hessian(history[-1])
newparams = history[-1] - np.dot(np.linalg.inv(H),
self.score(history[-1]))
history.append(newparams)
iteration += 1
mlefit = LikelihoodModelResults(self, newparams)
mlefit.iteration = iteration
elif method == 'bfgs':
score=None
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
optimize.fmin_bfgs(f, start_params, score, full_output=1,
maxiter=maxiter, gtol=tol)
converge = not warnflag
mlefit = LikelihoodModelResults(self, xopt)
optres = 'xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag'
self.optimresults = dict(zip(optres.split(', '),[
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag]))
elif method == 'ncg':
xopt, fopt, fcalls, gcalls, hcalls, warnflag = \
optimize.fmin_ncg(f, start_params, score, fhess=hess,
full_output=1, maxiter=maxiter, avextol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
elif method == 'fmin':
#fmin(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None)
xopt, fopt, niter, funcalls, warnflag = \
optimize.fmin(f, start_params,
full_output=1, maxiter=maxiter, xtol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
self._results = mlefit
return mlefit
#TODO: I take it this is only a stub and should be included in another
# model class?
class TSMLEModel(LikelihoodModel):
"""
univariate time series model for estimation with maximum likelihood
Note: This is not working yet
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(TSMLEModel, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def geterrors(self, params):
raise NotImplementedError
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
raise NotImplementedError
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
'''estimate model by minimizing negative loglikelihood
does this need to be overwritten ?
'''
if start_params is None and hasattr(self, '_start_params'):
start_params = self._start_params
#start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(TSMLEModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
class Garch0(TSMLEModel):
'''Garch model,
still experimentation stage:
simplified structure, plain garch, no constraints
still looking for the design of the base class
serious bug:
ar estimate looks ok, ma estimate awful
-> check parameterization of lagpolys and constant
looks ok after adding missing constant
but still difference to garch11 function
corrected initial condition
-> only small differences left between the 3 versions
ar estimate is close to true/DGP model
note constant has different parameterization
but design looks better
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
self._etax = endog**2
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = etax.shape[0]
#check arguments of lfilter
zi = signal.lfiltic(ma,ar, icetax)
#h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
#just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
h = signal.lfilter(ma, ar, etax, zi=zi)[0]
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class GarchX(TSMLEModel):
'''Garch model,
still experimentation stage:
another version, this time with exog and miso_filter
still looking for the design of the base class
not done yet, just a design idea
* use misofilter as in garch (gjr)
* but take etax = exog
this can include constant, asymetric effect (gjr) and
other explanatory variables (e.g. high-low spread)
todo: renames
eta -> varprocess
etax -> varprocessx
icetax -> varprocessic (is actually ic of eta/sigma^2)
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
#nobs defined in super - verify
#self.nobs = nobs = endog.shape[0]
#add nexog to super
#self.nexog = nexog = exog.shape[1]
self._etax = np.column_stack(np.ones((nobs,1)), endog**2, exog)
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def convert_mod2params(ar, ma, mu):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = self.nobs
## #check arguments of lfilter
## zi = signal.lfiltic(ma,ar, icetax)
## #h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
## #just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
## h = signal.lfilter(ma, ar, etax, zi=zi)[0]
##
h = miso_lfilter(ar, ma, etax, useic=self._icetax)[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#todo: raise warning, maybe not during optimization calls
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class Garch(TSMLEModel):
'''Garch model gjrgarch (t-garch)
still experimentation stage, try with
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
'''
Parameters
----------
params : tuple, (mu, ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma = params
eta = self.endog
nobs = eta.shape[0]
etax = np.empty((nobs,3))
etax[:,0] = 1
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
#print('etax.shape', etax.shape
h = miso_lfilter(ar, ma, etax, useic=np.atleast_1d(etax[:,1].mean()))[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#print('Warning negative variance found'
#check timing, starting time for h and eta, do they match
#err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
# let it break if there is a len/shape mismatch
err = np.sqrt(h)*eta
return err, h, etax
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params = (ar, ma) #(mu, ar, ma)
errorsest, h, etax = self.geterrors(params)
#temporary safe for debugging
self.params_converted = params
self.errorsest, self.h, self.etax = errorsest, h, etax
#h = h[:-1] #correct this in geterrors
#print('shapes errorsest, h, etax', errorsest.shape, h.shape, etax.shape
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #not used
# muy = errorsest.mean()
# # llike is verified, see below
# # same as with y = errorsest, ht = sigma2
# # np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum(((errorsest)**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
# return llike
muy = errorsest.mean()
# llike is verified, see below
# same as with y = errorsest, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def gjrconvertparams(self, params, nar, nma):
"""
flat to matrix
Notes
-----
needs to be overwritten by subclass
"""
p, q = nar, nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params2 = (ar, ma) #(mu, ar, ma)
return paramsclass
#TODO: this should be generalized to ARMA?
#can possibly also leverage TSME above
# also note that this is NOT yet general
# it was written for my homework, assumes constant is zero
# and that process is AR(1)
# examples at the end of run as main below
class AR(LikelihoodModel):
"""
Notes
-----
This is not general, only written for the AR(1) case.
Fit methods that use super and broyden do not yet work.
"""
def __init__(self, endog, exog=None, nlags=1):
if exog is None: # extend to handle ADL(p,q) model? or subclass?
exog = endog[:-nlags]
endog = endog[nlags:]
super(AR, self).__init__(endog, exog)
self.nobs += nlags # add lags back to nobs for real T
#TODO: need to fix underscore in Model class.
#Done?
def initialize(self):
pass
def loglike(self, params):
"""
The unconditional loglikelihood of an AR(p) process
Notes
-----
Contains constant term.
"""
nobs = self.nobs
y = self.endog
ylag = self.exog
penalty = self.penalty
if isinstance(params,tuple):
# broyden (all optimize.nonlin return a tuple until rewrite commit)
params = np.asarray(params)
usepenalty=False
if not np.all(np.abs(params)<1) and penalty:
oldparams = params
params = np.array([.9999]) # make it the edge
usepenalty=True
diffsumsq = sumofsq(y-np.dot(ylag,params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
loglike = -nobs/2 * np.log(2*np.pi) - nobs/2*np.log(sigma2) + \
.5 * np.log(1-params**2) - .5*diffsumsq/sigma2 -\
ylag[0]**2 * (1-params**2)/(2*sigma2)
if usepenalty:
# subtract a quadratic penalty since we min the negative of loglike
loglike -= 1000 *(oldparams-.9999)**2
return loglike
def score(self, params):
"""
Notes
-----
Need to generalize for AR(p) and for a constant.
Not correct yet. Returns numerical gradient. Depends on package
numdifftools.
"""
y = self.endog
ylag = self.exog
nobs = self.nobs
diffsumsq = sumofsq(y-np.dot(ylag,params))
dsdr = 1/nobs * -2 *np.sum(ylag*(y-np.dot(ylag,params))[:,None])+\
2*params*ylag[0]**2
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
gradient = -nobs/(2*sigma2)*dsdr + params/(1-params**2) + \
1/sigma2*np.sum(ylag*(y-np.dot(ylag, params))[:,None])+\
.5*sigma2**-2*diffsumsq*dsdr+\
ylag[0]**2*params/sigma2 +\
ylag[0]**2*(1-params**2)/(2*sigma2**2)*dsdr
if self.penalty:
pass
j = Jacobian(self.loglike)
return j(params)
# return gradient
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now. Depends on numdifftools.
"""
h = Hessian(self.loglike)
return h(params)
def fit(self, start_params=None, method='bfgs', maxiter=35, tol=1e-08,
penalty=False):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
start_params : array-like, optional
A first guess on the parameters. Defaults is a vector of zeros.
method : str, optional
Unconstrained solvers:
Default is 'bfgs', 'newton' (newton-raphson), 'ncg'
(Note that previous 3 are not recommended at the moment.)
and 'powell'
Constrained solvers:
'bfgs-b', 'tnc'
See notes.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol = float
The convergence tolerance. Default is 1e-08.
penalty : bool
Whether or not to use a penalty function. Default is False,
though this is ignored at the moment and the penalty is always
used if appropriate. See notes.
Notes
-----
The unconstrained solvers use a quadratic penalty (regardless if
penalty kwd is True or False) in order to ensure that the solution
stays within (-1,1). The constrained solvers default to using a bound
of (-.999,.999).
"""
self.penalty = penalty
method = method.lower()
#TODO: allow user-specified penalty function
# if penalty and method not in ['bfgs_b','tnc','cobyla','slsqp']:
# minfunc = lambda params : -self.loglike(params) - \
# self.penfunc(params)
# else:
minfunc = lambda params: -self.loglike(params)
if method in ['newton', 'bfgs', 'ncg']:
super(AR, self).fit(start_params=start_params, method=method,
maxiter=maxiter, tol=tol)
else:
bounds = [(-.999,.999)] # assume stationarity
if start_params == None:
start_params = np.array([0]) #TODO: assumes AR(1)
if method == 'bfgs-b':
retval = optimize.fmin_l_bfgs_b(minfunc, start_params,
approx_grad=True, bounds=bounds)
self.params, self.llf = retval[0:2]
if method == 'tnc':
retval = optimize.fmin_tnc(minfunc, start_params,
approx_grad=True, bounds = bounds)
self.params = retval[0]
if method == 'powell':
retval = optimize.fmin_powell(minfunc,start_params)
self.params = retval[None]
#TODO: write regression tests for Pauli's branch so that
# new line_search and optimize.nonlin can get put in.
#http://projects.scipy.org/scipy/ticket/791
# if method == 'broyden':
# retval = optimize.broyden2(minfunc, [.5], verbose=True)
# self.results = retval
class Arma(LikelihoodModel):
"""
univariate Autoregressive Moving Average model
Note: This is not working yet, or does it
this can subclass TSMLEModel
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Arma, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
#copied from sandbox.tsa.arima.ARIMA
p, q = self.nar, self.nma
rhoy = np.concatenate(([1], params[:p]))
rhoe = np.concatenate(([1], params[p:p+q]))
errorsest = signal.lfilter(rhoy, rhoe, self.endog)
return errorsest
def loglike(self, params):
"""
Loglikelihood for arma model
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
# #copied from sandbox.tsa.arima.ARIMA
# p = self.nar
# rhoy = np.concatenate(([1], params[:p]))
# rhoe = np.concatenate(([1], params[p:-1]))
# errorsest = signal.lfilter(rhoy, rhoe, self.endog)
errorsest = self.geterrors(params)
sigma2 = np.maximum(params[-1]**2, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum((errorsest**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
llike = -0.5 * (nobs*np.log(sigma2)
+ np.sum((errorsest**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
if start_params is None:
start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(Arma, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
def generate_kindofgarch(nobs, ar, ma, mu=1.):
'''simulate garch like process but not squared errors in arma
used for initial trial but produces nice graph
'''
#garm1, gmam1 = [0.4], [0.2]
#pqmax = 1
# res = np.zeros(nobs+pqmax)
# rvs = np.random.randn(nobs+pqmax,2)
# for t in range(pqmax,nobs+pqmax):
# res[i] =
#ar = [1.0, -0.99]
#ma = [1.0, 0.5]
#this has the wrong distribution, should be eps**2
#TODO: use new version tsa.arima.??? instead, has distr option
#arest = tsa.arima.ARIMA()
#arest = tsa.arima.ARIMA #try class method, ARIMA needs data in constructor
from statsmodels.tsa.arima_process import arma_generate_sample
h = arma_generate_sample(ar,ma,nobs,0.1)
#h = np.abs(h)
h = (mu+h)**2
h = np.exp(h)
err = np.sqrt(h)*np.random.randn(nobs)
return err, h
def generate_garch(nobs, ar, ma, mu=1., scale=0.1):
'''simulate standard garch
scale : float
scale/standard deviation of innovation process in GARCH process
'''
eta = scale*np.random.randn(nobs)
# copied from armageneratesample
h = signal.lfilter(ma, ar, eta**2)
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
err = np.sqrt(h)*eta #np.random.standard_t(8, size=nobs)
return err, h
def generate_gjrgarch(nobs, ar, ma, mu=1., scale=0.1, varinnovation=None):
'''simulate gjr garch process
Parameters
----------
ar : array_like, 1d
autoregressive term for variance
ma : array_like, 2d
moving average term for variance, with coefficients for negative
shocks in second column
mu : float
constant in variance law of motion
scale : float
scale/standard deviation of innovation process in GARCH process
Returns
-------
err : array 1d, (nobs+?,)
simulated gjr-garch process,
h : array 1d, (nobs+?,)
simulated variance
etax : array 1d, (nobs+?,)
data matrix for constant and ma terms in variance equation
Notes
-----
References
----------
'''
if varinnovation is None: # rename ?
eta = scale*np.random.randn(nobs)
else:
eta = varinnovation
# copied from armageneratesample
etax = np.empty((nobs,3))
etax[:,0] = mu
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
h = miso_lfilter(ar, ma, etax)[0]
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
#print('h.shape', h.shape)
err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
return err, h, etax
def loglike_GARCH11(params, y):
# Computes the likelihood vector of a GARCH11
# assumes y is centered
w = params[0] # constant (1);
alpha = params[1] # coefficient of lagged squared error
beta = params[2] # coefficient of lagged variance
y2 = y**2;
nobs = y2.shape[0]
ht = np.zeros(nobs);
ht[0] = y2.mean() #sum(y2)/T;
for i in range(1,nobs):
ht[i] = w + alpha*y2[i-1] + beta * ht[i-1]
sqrtht = np.sqrt(ht)
x = y/sqrtht
llvalues = -0.5*np.log(2*np.pi) - np.log(sqrtht) - 0.5*(x**2);
return llvalues.sum(), llvalues, ht
from statsmodels.tsa.filters.filtertools import miso_lfilter
#copied to statsmodels.tsa.filters.filtertools
def miso_lfilter_old(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
def test_misofilter():
x = np.arange(20).reshape(10,2)
y, inp = miso_lfilter([1., -1],[[1,1],[0,0]], x)
assert_almost_equal(y[:-1], x.sum(1).cumsum(), decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(2))[1::2]
assert_almost_equal(inp[:-1], inp2, decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(4))[1::2]
y, inp = miso_lfilter([1., -1],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2.cumsum(), decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
y, inp = miso_lfilter([1., 0],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2, decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
y3 = (x3*np.array([-2,3,1])).sum(1)
assert_almost_equal(y[:-1], y3, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,1]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,1.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,0]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,2]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y, inp = miso_lfilter([1., 0],[[1,0],[1,0],[1,0]], x)
yt = np.convolve(x[:,0], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[0,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[1,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
yt[2:] += x[:,0]
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
def test_gjrgarch():
# test impulse response of gjr simulator
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1., 0.1, 0.05, 0.01, 0., 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
# negative shock
varinno = np.zeros(100)
varinno[0] = -1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1. , 0.9 , 0.75, 0.61, 0. , 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
'''
>>> print(signal.correlate(x3, np.array([[-2.0,3,1],[0.0,0.0,0]])[::-1,:],mode='full')[:-1, (x3.shape[1]+1)//2]
[ -1. 7. 15. 23. 31. 39. 47. 55. 63. 71.]
>>> (x3*np.array([-2,3,1])).sum(1)
array([ -1., 7., 15., 23., 31., 39., 47., 55., 63., 71.])
'''
def garchplot(err, h, title='Garch simulation'):
plt.figure()
plt.subplot(311)
plt.plot(err)
plt.title(title)
plt.ylabel('y')
plt.subplot(312)
plt.plot(err**2)
plt.ylabel('$y^2$')
plt.subplot(313)
plt.plot(h)
plt.ylabel('conditional variance')
if __name__ == '__main__':
#test_misofilter()
#test_gjrgarch()
examples = ['garch']
if 'arma' in examples:
arest = tsa.arima.ARIMA()
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
y1 -= y1.mean() #no mean correction/constant in estimation so far
arma1 = Arma(y1)
arma1.nar = 1
arma1.nma = 1
arma1res = arma1.fit(method='fmin')
print(arma1res.params)
#Warning need new instance otherwise results carry over
arma2 = Arma(y1)
res2 = arma2.fit(method='bfgs')
print(res2.params)
print(res2.model.hessian(res2.params))
print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))
resls = arest.fit(y1,1,1)
print(resls[0])
print(resls[1])
print('\nparameter estimate')
print('parameter of DGP ar(1), ma(1), sigma_error')
print([-0.8, 0.5, 0.1])
print('mle with fmin')
print(arma1res.params)
print('mle with bfgs')
print(res2.params)
print('cond. least squares uses optim.leastsq ?')
errls = arest.error_estimate
print(resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0]))
err = arma1.geterrors(res2.params)
print('cond least squares parameter cov')
#print(np.dot(err,err)/err.shape[0] * resls[1])
#errls = arest.error_estimate
print(np.dot(errls,errls)/errls.shape[0] * resls[1])
# print('fmin hessian')
# print(arma1res.model.optimresults['Hopt'][:2,:2])
print('bfgs hessian')
print(res2.model.optimresults['Hopt'][:2,:2])
print('numdifftools inverse hessian')
print(-np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2])
arma3 = Arma(y1**2)
res3 = arma3.fit(method='bfgs')
print(res3.params)
nobs = 1000
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.4,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h)
#plt.show()
print('Garch11')
print(optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2]))
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
if 'rpy' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
llf = loglike_GARCH11([0.93, 0.9, 0.2], errgjr4)
print(llf[0])
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
''' this looks relatively good
>>> Arma.initialize = lambda x: x
>>> arma3 = Arma(errgjr4**2)
>>> arma3res = arma3.fit()
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.775, -0.583, -0.001])
>>> arma2.nar
1
>>> arma2.nma
1
unit root ?
>>> arma3 = Arma(hgjr4)
>>> arma3res = arma3.fit()
Optimization terminated successfully.
Current function value: -3641.529780
Iterations: 250
Function evaluations: 458
>>> arma3res.params
array([ -1.000e+00, -3.096e-04, 6.343e-03])
or maybe not great
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000)
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.086, 0.186, -0.001])
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000,method='bfgs')
Divide-by-zero encountered: rhok assumed large
Optimization terminated successfully.
Current function value: -5988.332952
Iterations: 16
Function evaluations: 245
Gradient evaluations: 49
>>> arma3res.params
array([ -9.995e-01, -9.715e-01, 6.501e-04])
'''
'''
current problems
persistence in errgjr looks too low, small tsa.acf(errgjr4**2)[:15]
as a consequence the ML estimate has also very little persistence,
estimated ar term is much too small
-> need to compare with R or matlab
help.search("garch") : ccgarch, garchSim(fGarch), garch(tseries)
HestonNandiGarchFit(fOptions)
> library('fGarch')
> spec = garchSpec()
> x = garchSim(model = spec@model, n = 500)
> acf(x**2) # has low correlation
but fit has high parameters:
> fit = garchFit(~garch(1, 1), data = x)
with rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4)
Final Estimate:
LLH: -3198.2 norm LLH: -3.1982
mu omega alpha1 beta1
1.870485e-04 9.437557e-05 3.457349e-02 1.000000e-08
second run with ar = [1.0, -0.8] ma = [[1., 0, 0],[0, 1.0,0.0]]
Final Estimate:
LLH: -3979.555 norm LLH: -3.979555
mu omega alpha1 beta1
1.465050e-05 1.641482e-05 1.092600e-01 9.654438e-02
mine:
>>> ggres.params
array([ -2.000e-06, 3.283e-03, 3.769e-01, -1.000e-06])
another rain, same ar, ma
Final Estimate:
LLH: -3956.197 norm LLH: -3.956197
mu omega alpha1 beta1
7.487278e-05 1.171238e-06 1.511080e-03 9.440843e-01
every step needs to be compared and tested
something looks wrong with likelihood function, either a silly
mistake or still some conceptional problems
* found the silly mistake, I was normalizing the errors before
plugging into espression for likelihood function
* now gjr garch estimation works and produces results that are very
close to the explicit garch11 estimation
initial conditions for miso_filter need to be cleaned up
lots of clean up to to after the bug hunting
'''
y = np.random.randn(20)
params = [0.93, 0.9, 0.2]
lls, llt, ht = loglike_GARCH11(params, y)
sigma2 = ht
axis=0
nobs = len(ht)
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum((y**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
print(lls, llike)
#print(np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum())
'''
>>> optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4)[0], [0.93, 0.9, 0.2])
Optimization terminated successfully.
Current function value: 7312.393886
Iterations: 95
Function evaluations: 175
array([ 3.691, 0.072, 0.932])
>>> ar
[1.0, -0.93000000000000005]
>>> ma
[[1.0, 0, 0], [0, 0.90000000000000002, 0.0]]
'''
np.random.seed(1)
tseries = np.zeros(200) # set first observation
for i in range(1,200): # get 99 more observations based on the given process
error = np.random.randn()
tseries[i] = .9 * tseries[i-1] + .01 * error
tseries = tseries[100:]
armodel = AR(tseries)
#armodel.fit(method='bfgs-b')
#armodel.fit(method='tnc')
#powell should be the most robust, see Hamilton 5.7
armodel.fit(method='powell', penalty=True)
# The below don't work yet
#armodel.fit(method='newton', penalty=True)
#armodel.fit(method='broyden', penalty=True)
print("Unconditional MLE for AR(1) y_t = .9*y_t-1 +.01 * err")
print(armodel.params)
| bsd-3-clause |
ap--/python-oceanoptics | examples/liveview.py | 1 | 4743 | #!/usr/bin/env python
""" File: example_liveview.py
Author: Andreas Poehlmann
Last change: 2013/02/27
Liveview example
"""
import oceanoptics
import time
import numpy as np
from gi.repository import Gtk, GLib
class mpl:
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
class DynamicPlotter(Gtk.Window):
def __init__(self, sample_interval_sec=0.1, smoothing=1, oversampling=1, raw=False, size=(600,350), outfile=None,
lower=None, upper=None):
# Gtk stuff
Gtk.Window.__init__(self, title='Ocean Optics Spectrometer')
self.connect("destroy", lambda x : Gtk.main_quit())
self.set_default_size(*size)
# Data stuff
self.sample_interval_ms = int(sample_interval_sec*1000)
self.smoothing = int(smoothing)
self._sample_n = 0
self.raw = bool(raw)
self.spectrometer = oceanoptics.get_a_random_spectrometer()
self.spectrometer.integration_time(time_sec=(sample_interval_sec * 0.8))
self.wl = self.spectrometer.wavelengths()
self.sp = self.spectrometer.intensities()
self.sp = np.zeros((len(self.sp), int(oversampling)))
# MPL stuff
self.figure = mpl.Figure()
self.ax = self.figure.add_subplot(1, 1, 1)
self.ax.set_xlim(left=lower, right=upper, auto=True)
self.ax.grid(True)
self.canvas = mpl.FigureCanvas(self.figure)
self.line, = self.ax.plot(self.wl, self.sp[:,0])
# Logging
self.outfile = outfile
if self.outfile is not None:
self.outfile.write('# Spectrum File\n'
'# initial table of wavelengths\n'
'# column_number, wavelength\n')
self.outfile.write("\n".join('# %d, %f' % x for x in enumerate(self.wl)))
self.outfile.write("\n#--------------------------\n# time, ")
self.outfile.write(", ".join("%d" % i for i in range(len(self.wl))))
self.outfile.write("\n")
# Gtk stuff
self.add(self.canvas)
self.canvas.show()
self.show_all()
def update_plot(self):
# -> redraw on new spectrum
# -> average over self.sample_n spectra
# -> smooth if self.smoothing
# remark:
# > smoothing can be done after averaging
# get spectrum
sp = np.array(self.spectrometer.intensities(raw=self.raw))
self.sp[:,self._sample_n] = sp
self._sample_n += 1
self._sample_n %= self.sp.shape[1]
if self._sample_n != 0: # do not draw or average
return
# average!
sp = np.mean(self.sp, axis=1)
if self.smoothing > 1:
n = self.smoothing
kernel = np.ones((n,)) / n
sp = np.convolve(sp, kernel)[(n-1):]
self.line.set_ydata(sp)
if self.outfile is not None:
self.outfile.write("%f, " % time.time())
self.outfile.write(", ".join("%f" % s for s in sp) + "\n")
self.ax.relim()
self.ax.autoscale_view(False, False, True)
self.canvas.draw()
return True
def run(self):
GLib.timeout_add(self.sample_interval_ms, self.update_plot)
Gtk.main()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--raw', action='store_true', help='Show raw detector values')
parser.add_argument('-i', '--interval', type=float, default=0.1, metavar='SECONDS',
help='Update interval')
parser.add_argument('-s', '--smooth', type=int, default=1, metavar='N',
help='Number of spectrum points to average over')
parser.add_argument('-O', '--oversample', type=int, default=1, metavar='N',
help='Average together successive spectra')
parser.add_argument('-l', '--lower', type=float, default=None, metavar='WAVELENGTH',
help='Lower bound of plot')
parser.add_argument('-u', '--upper', type=float, default=None, metavar='WAVELENGTH',
help='Upper bound of plot')
parser.add_argument('--out', type=argparse.FileType('w'), default=None,
help='save to text-file, please implement your own way for saving... this is really just an example (and completely inefficient!!!)')
args = parser.parse_args()
m = DynamicPlotter(sample_interval_sec=args.interval, raw=args.raw, smoothing=args.smooth,
oversampling=args.oversample, outfile=args.out,
lower=args.lower, upper=args.upper)
m.run()
| mit |
dvklopfenstein/biocode | src/Tests/matplotlib/hombre_scatterplot.py | 1 | 1282 | #!/usr/bin/env python
"""Create scatter plot with hombre-style fade using MplColorHelper."""
# Copyright (C) 2014-2015 DV Klopfenstein. All rights reserved.
__author__ = 'DV Klopfenstein'
__copyright__ = "Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved."
__license__ = "GPL"
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pydvkbiology.matplotlib.ColorObj import MplColorHelper
import numpy as np
def main():
"""Create scatter plot with hombre-style fade using MplColorHelper."""
_, axis = plt.subplots(1, 1, figsize=(6, 6))
num_vals = 20
#pylint: disable=no-member
xval = np.random.uniform(0, num_vals, size=3*num_vals)
yval = np.random.uniform(0, num_vals, size=3*num_vals)
# define the color chart between 2 and 10 using the 'autumn_r' colormap, so
# yval <= 2 is yellow
# yval >= 10 is red
# 2 < yval < 10 is between from yellow to red, according to its value
objcol = MplColorHelper('autumn_r', 5, 15)
axis.scatter(xval, yval, s=200, c=objcol.get_rgb(yval))
axis.set_title('Well defined discrete colors')
plt.show()
plt.savefig('hombre_scatterplot.png')
if __name__ == '__main__':
main()
# Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved.
| mit |
pysb/pysb | setup.py | 5 | 2077 | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import versioneer
import os
def main():
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.rst'), 'r') as f:
long_description = f.read()
cmdclass = versioneer.get_cmdclass()
setup(name='pysb',
version=versioneer.get_version(),
description='Python Systems Biology modeling framework',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Jeremy Muhlich',
author_email='[email protected]',
url='http://pysb.org/',
packages=['pysb', 'pysb.generator', 'pysb.importers', 'pysb.tools',
'pysb.examples', 'pysb.export', 'pysb.simulator',
'pysb.testing', 'pysb.tests'],
scripts=['scripts/pysb_export'],
# We should really specify some minimum versions here.
python_requires='>=3.6',
install_requires=['numpy', 'scipy>=1.1', 'sympy>=1.6', 'networkx',
'futures; python_version == "2.7"'],
setup_requires=['nose'],
tests_require=['coverage', 'pygraphviz', 'matplotlib', 'pexpect',
'pandas', 'h5py', 'mock', 'cython',
'python-libsbml', 'libroadrunner'],
cmdclass=cmdclass,
keywords=['systems', 'biology', 'model', 'rules'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
if __name__ == '__main__':
main()
| bsd-2-clause |
Eric89GXL/scipy | scipy/special/add_newdocs.py | 3 | 185466 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# _generate_pyx.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "agm",
"""
agm(a, b)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
Returns
-------
float
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative distribution function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative distribution function of the beta distribution with
parameters `a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
r"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ellipeinc",
r"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "ellipkinc",
r"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``10**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``2**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
The expit function, also known as the logistic sigmoid function, is
defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the
logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are `expit` of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
Returns
-------
float
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative distribution function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
r"""
gamma(z)
Gamma function.
.. math::
\Gamma(z) = \int_0^\infty x^{z-1} e^{-x} dx = (z - 1)!
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
Parameters
----------
z : float or complex array_like
Returns
-------
float or complex
The value(s) of gamma(z)
Examples
--------
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
Logarithm of the absolute value of the Gamma function.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative distribution function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtrix
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
r"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic continuation.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [1]_.
References
----------
.. [1] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
.. [2] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [3] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``integral(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse Survival Function of Kolmogorov distribution
It is the inverse function to `kolmogorov`.
Returns y such that ``kolmogorov(y) == p``.
Parameters
----------
p : float array_like
Probability
Returns
-------
float
The value(s) of kolmogi(p)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distrubution.
See Also
--------
kolmogorov : The Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
>>> from scipy.special import kolmogi
>>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,
0.57117327, 0. ])
""")
add_newdoc("scipy.special", "kolmogorov",
r"""
kolmogorov(y)
Complementary cumulative distribution (Survival Function) function of
Kolmogorov distribution.
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
of a two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that ``sqrt(n) * max absolute deviation > y``.
Parameters
----------
y : float array_like
Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
multiplied by sqrt(n).
Returns
-------
float
The value(s) of kolmogorov(y)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distrubution.
See Also
--------
kolmogi : The Inverse Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
Show the probability of a gap at least as big as 0, 0.5 and 1.0.
>>> from scipy.special import kolmogorov
>>> from scipy.stats import kstwobign
>>> kolmogorov([0, 0.5, 1.0])
array([ 1. , 0.96394524, 0.26999967])
Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
the target distribution, a Normal(0, 1) distribution.
>>> from scipy.stats import norm, laplace
>>> n = 1000
>>> np.random.seed(seed=233423)
>>> lap01 = laplace(0, 1)
>>> x = np.sort(lap01.rvs(n))
>>> np.mean(x), np.std(x)
(-0.083073685397609842, 1.3676426568399822)
Construct the Empirical CDF and the K-S statistic Dn.
>>> target = norm(0,1) # Normal mean 0, stddev 1
>>> cdfs = target.cdf(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> Dn = np.max(gaps)
>>> Kn = np.sqrt(n) * Dn
>>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
Dn=0.058286, sqrt(n)*Dn=1.843153
>>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)),
... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))]))
For a sample of size n drawn from a N(0, 1) distribution:
the approximate Kolmogorov probability that sqrt(n)*Dn>=1.843153 is 0.002240
the approximate Kolmogorov probability that sqrt(n)*Dn<=1.843153 is 0.997760
Plot the Empirical CDF against the target N(0, 1) CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
>>> # Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("scipy.special", "_kolmogc",
r"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_kolmogci",
r"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_kolmogp",
r"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("scipy.special", "lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
Returns
-------
f : float
Quantiles, i.e. the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfd : float
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfn : float
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
nc : float
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
r"""
pbwa(a, x)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
r"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
Parameters
----------
z : array_like
(int or float)
m : array_like
(int or float)
Returns
-------
poch : ndarray
The value of the function.
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
r"""
smirnov(n, d)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function,(aka the Survival Function) of Dn+ (or Dn-)
for a one-sided test of equality between an empirical and a
theoretical distribution. It is equal to the probability that the
maximum difference between a theoretical distribution and an empirical
one based on `n` samples is greater than d.
Parameters
----------
n : int
Number of samples
d : float array_like
Deviation between the Empirical CDF (ECDF) and the target CDF.
Returns
-------
float
The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distrubution.
See Also
--------
smirnovi : The Inverse Survival Function for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
Examples
--------
>>> from scipy.special import smirnov
Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a sample of size 5
>>> smirnov(5, [0, 0.5, 1.0])
array([ 1. , 0.056, 0. ])
Compare a sample of size 5 drawn from a source N(0.5, 1) distribution against
a target N(0, 1) CDF.
>>> from scipy.stats import norm
>>> n = 5
>>> gendist = norm(0.5, 1) # Normal distribution, mean 0.5, stddev 1
>>> np.random.seed(seed=233423) # Set the seed for reproducibility
>>> x = np.sort(gendist.rvs(size=n))
>>> x
array([-0.20946287, 0.71688765, 0.95164151, 1.44590852, 3.08880533])
>>> target = norm(0, 1)
>>> cdfs = target.cdf(x)
>>> cdfs
array([ 0.41704346, 0.76327829, 0.82936059, 0.92589857, 0.99899518])
# Construct the Empirical CDF and the K-S statistics (Dn+, Dn-, Dn)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> np.set_printoptions(precision=3)
>>> cols
array([[ -2.095e-01, 2.000e-01, 4.170e-01, 4.170e-01, -2.170e-01],
[ 7.169e-01, 4.000e-01, 7.633e-01, 5.633e-01, -3.633e-01],
[ 9.516e-01, 6.000e-01, 8.294e-01, 4.294e-01, -2.294e-01],
[ 1.446e+00, 8.000e-01, 9.259e-01, 3.259e-01, -1.259e-01],
[ 3.089e+00, 1.000e+00, 9.990e-01, 1.990e-01, 1.005e-03]])
>>> gaps = cols[:, -2:]
>>> Dnpm = np.max(gaps, axis=0)
>>> print('Dn-=%f, Dn+=%f' % (Dnpm[0], Dnpm[1]))
Dn-=0.563278, Dn+=0.001005
>>> probs = smirnov(n, Dnpm)
>>> print(chr(10).join(['For a sample of size %d drawn from a N(0, 1) distribution:' % n,
... ' Smirnov n=%d: Prob(Dn- >= %f) = %.4f' % (n, Dnpm[0], probs[0]),
... ' Smirnov n=%d: Prob(Dn+ >= %f) = %.4f' % (n, Dnpm[1], probs[1])]))
For a sample of size 5 drawn from a N(0, 1) distribution:
Smirnov n=5: Prob(Dn- >= 0.563278) = 0.0250
Smirnov n=5: Prob(Dn+ >= 0.001005) = 0.9990
Plot the Empirical CDF against the target N(0, 1) CDF
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
# Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, p)
Inverse to `smirnov`
Returns `d` such that ``smirnov(n, d) == p``, the critical value
corresponding to `p`.
Parameters
----------
n : int
Number of samples
p : float array_like
Probability
Returns
-------
float
The value(s) of smirnovi(n, p), the critical values.
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distrubution.
See Also
--------
smirnov : The Survival Function (SF) for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi, scipy.stats.kstwobign : Functions for the two-sided distribution
""")
add_newdoc("scipy.special", "_smirnovc",
"""
_smirnovc(n, d)
Internal function, do not use.
""")
add_newdoc("scipy.special", "_smirnovci",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_smirnovp",
"""
_smirnovp(n, p)
Internal function, do not use.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative distribution function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``. For ``x < 1``, the analytic continuation is computed.
Because of limitations of the numerical algorithm, ``zetac(x)`` returns
`nan` for `x` less than -30.8148.
Parameters
----------
x : array_like of float
Values at which to compute zeta(x) - 1 (must be real).
Returns
-------
out : array_like
Values of zeta(x) - 1.
See Also
--------
zeta
Examples
--------
>>> from scipy.special import zetac, zeta
Some special values:
>>> zetac(2), np.pi**2/6 - 1
(0.64493406684822641, 0.6449340668482264)
>>> zetac(-1), -1.0/12 - 1
(-1.0833333333333333, -1.0833333333333333)
Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
>>> zetac(60), zeta(60) - 1
(8.673617380119933e-19, 0.0)
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining `loggamma` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas `loggamma` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make `loggamma` useful for working in complex logspace.
On the real line `loggamma` is related to `gammaln` via
``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
rounding error.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "owens_t",
"""
owens_t(h, a)
Owen's T Function.
The function T(h, a) gives the probability of the event
(X > h and 0 < Y < a * X) where X and Y are independent
standard normal random variables.
Parameters
----------
h: array_like
Input value.
a: array_like
Input value.
Returns
-------
t: scalar or ndarray
Probability of the event (X > h and 0 < Y < a * X),
where X and Y are independent standard normal random variables.
Examples
--------
>>> from scipy import special
>>> a = 3.5
>>> h = 0.78
>>> special.owens_t(h, a)
0.10877216734852274
References
----------
.. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
""")
| bsd-3-clause |
ElDeveloper/qiita | qiita_ware/commands.py | 2 | 14020 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename, isdir, join
from shutil import rmtree
from tarfile import open as taropen
from tempfile import mkdtemp
from os import environ, stat
from traceback import format_exc
from paramiko import AutoAddPolicy, RSAKey, SSHClient
from scp import SCPClient
from urllib.parse import urlparse
from functools import partial
import pandas as pd
from qiita_db.artifact import Artifact
from qiita_db.logger import LogEntry
from qiita_db.processing_job import _system_call as system_call
from qiita_core.qiita_settings import qiita_config
from qiita_ware.ebi import EBISubmission
from qiita_ware.exceptions import ComputeError, EBISubmissionError
def _ssh_session(p_url, private_key):
"""Initializes an SSH session
Parameters
----------
p_url : urlparse object
a parsed url
private_key : str
Path to the private key used to authenticate connection
Returns
-------
paramiko.SSHClient
the SSH session
"""
scheme = p_url.scheme
hostname = p_url.hostname
# if port is '' Python 2.7.6 will raise an error
try:
port = p_url.port
except Exception:
port = 22
username = p_url.username
if scheme == 'scp' or scheme == 'sftp':
# if port not specified, use default 22 as port
if port is None:
port = 22
# step 1: both schemes require an SSH connection
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy)
# step 2: connect to fileserver
key = RSAKey.from_private_key_file(private_key)
ssh.connect(hostname, port=port, username=username,
pkey=key, look_for_keys=False)
return ssh
else:
raise ValueError(
'Not valid scheme. Valid options are ssh and scp.')
def _list_valid_files(ssh, directory):
"""Gets a list of valid study files from ssh session
Parameters
----------
ssh : paramiko.SSHClient
An initializeed ssh session
directory : str
the directory to search for files
Returns
-------
list of str
list of valid study files (basenames)
"""
valid_file_extensions = tuple(qiita_config.valid_upload_extension)
sftp = ssh.open_sftp()
files = sftp.listdir(directory)
valid_files = [f for f in files if f.endswith(valid_file_extensions)]
sftp.close()
return valid_files
def list_remote(URL, private_key):
"""Retrieve valid study files from a remote directory
Parameters
----------
URL : str
The url to the remote directory
private_key : str
Path to the private key used to authenticate connection
Returns
-------
list of str
list of files that are valid study files
Notes
-----
Only the allowed extensions described by the config file
will be listed.
"""
p_url = urlparse(URL)
directory = p_url.path
ssh = _ssh_session(p_url, private_key)
valid_files = _list_valid_files(ssh, directory)
ssh.close()
return valid_files
def download_remote(URL, private_key, destination):
"""Add study files by specifying a remote directory to download from
Parameters
----------
URL : str
The url to the remote directory
private_key : str
Path to the private key used to authenticate connection
destination : str
The path to the study upload folder
"""
# step 1: initialize connection and list valid files
p_url = urlparse(URL)
ssh = _ssh_session(p_url, private_key)
directory = p_url.path
valid_files = _list_valid_files(ssh, directory)
file_paths = [join(directory, f) for f in valid_files]
# step 2: download files
scheme = p_url.scheme
# note that scp/sftp's code seems similar but the local_path/localpath
# variable is different within the for loop
if scheme == 'scp':
scp = SCPClient(ssh.get_transport())
for f in file_paths:
download = partial(
scp.get, local_path=join(destination, basename(f)))
download(f)
elif scheme == 'sftp':
sftp = ssh.open_sftp()
for f in file_paths:
download = partial(
sftp.get, localpath=join(destination, basename(f)))
download(f)
# step 3: close the connection
ssh.close()
def submit_EBI(artifact_id, action, send, test=False, test_size=False):
"""Submit an artifact to EBI
Parameters
----------
artifact_id : int
The artifact id
action : %s
The action to perform with this data
send : bool
True to actually send the files
test : bool
If True some restrictions will be ignored, only used in parse_EBI_reply
test_size : bool
If True the EBI-ENA restriction size will be changed to 6000
"""
# step 1: init and validate
ebi_submission = EBISubmission(artifact_id, action)
# step 2: generate demux fastq files
try:
ebi_submission.generate_demultiplexed_fastq()
except Exception:
error_msg = format_exc()
if isdir(ebi_submission.full_ebi_dir):
rmtree(ebi_submission.full_ebi_dir)
LogEntry.create('Runtime', error_msg,
info={'ebi_submission': artifact_id})
raise
# step 3: generate and write xml files
ebi_submission.generate_xml_files()
# before we continue let's check the size of the submission
to_review = [ebi_submission.study_xml_fp,
ebi_submission.sample_xml_fp,
ebi_submission.experiment_xml_fp,
ebi_submission.run_xml_fp,
ebi_submission.submission_xml_fp]
total_size = sum([stat(tr).st_size for tr in to_review if tr is not None])
# note that the max for EBI is 10M but let's play it safe
max_size = 10e+6 if not test_size else 5000
if total_size > max_size:
LogEntry.create(
'Runtime', 'The submission: %d is larger than allowed (%d), will '
'try to fix: %d' % (artifact_id, max_size, total_size))
# transform current metadata to dataframe for easier curation
rows = {k: dict(v) for k, v in ebi_submission.samples.items()}
df = pd.DataFrame.from_dict(rows, orient='index')
# remove unique columns and same value in all columns
nunique = df.apply(pd.Series.nunique)
nsamples = len(df.index)
cols_to_drop = set(
nunique[(nunique == 1) | (nunique == nsamples)].index)
# maximize deletion by removing also columns that are almost all the
# same or almost all unique
cols_to_drop = set(
nunique[(nunique <= int(nsamples * .01)) |
(nunique >= int(nsamples * .5))].index)
cols_to_drop = cols_to_drop - {'taxon_id', 'scientific_name',
'description'}
all_samples = ebi_submission.sample_template.ebi_sample_accessions
samples = [k for k in ebi_submission.samples if all_samples[k] is None]
if samples:
ebi_submission.write_xml_file(
ebi_submission.generate_sample_xml(samples, cols_to_drop),
ebi_submission.sample_xml_fp)
# now let's recalculate the size to make sure it's fine
new_total_size = sum([stat(tr).st_size
for tr in to_review if tr is not None])
LogEntry.create(
'Runtime', 'The submission: %d after cleaning is %d and was %d' % (
artifact_id, total_size, new_total_size))
if new_total_size > max_size:
raise ComputeError(
'Even after cleaning the submission: %d is too large. Before '
'cleaning: %d, after: %d' % (
artifact_id, total_size, new_total_size))
st_acc, sa_acc, bio_acc, ex_acc, run_acc = None, None, None, None, None
if send:
# getting aspera's password
old_ascp_pass = environ.get('ASPERA_SCP_PASS', '')
if old_ascp_pass == '':
environ['ASPERA_SCP_PASS'] = qiita_config.ebi_seq_xfer_pass
ascp_passwd = environ['ASPERA_SCP_PASS']
LogEntry.create('Runtime',
('Submission of sequences of pre_processed_id: '
'%d completed successfully' % artifact_id))
# step 4: sending sequences
if action != 'MODIFY':
LogEntry.create('Runtime',
("Submitting sequences for pre_processed_id: "
"%d" % artifact_id))
for cmd in ebi_submission.generate_send_sequences_cmd():
stdout, stderr, rv = system_call(cmd)
if rv != 0:
error_msg = ("ASCP Error:\nStd output:%s\nStd error:%s" % (
stdout, stderr))
environ['ASPERA_SCP_PASS'] = old_ascp_pass
raise ComputeError(error_msg)
open(ebi_submission.ascp_reply, 'a').write(
'stdout:\n%s\n\nstderr: %s' % (stdout, stderr))
environ['ASPERA_SCP_PASS'] = old_ascp_pass
# step 5: sending xml
xmls_cmds = ebi_submission.generate_curl_command(
ebi_seq_xfer_pass=ascp_passwd)
LogEntry.create('Runtime',
("Submitting XMLs for pre_processed_id: "
"%d" % artifact_id))
xml_content, stderr, rv = system_call(xmls_cmds)
if rv != 0:
error_msg = ("Error:\nStd output:%s\nStd error:%s" % (
xml_content, stderr))
raise ComputeError(error_msg)
else:
LogEntry.create('Runtime',
('Submission of sequences of pre_processed_id: '
'%d completed successfully' % artifact_id))
open(ebi_submission.curl_reply, 'w').write(
'stdout:\n%s\n\nstderr: %s' % (xml_content, stderr))
# parsing answer / only if adding
if action == 'ADD' or test:
try:
st_acc, sa_acc, bio_acc, ex_acc, run_acc = \
ebi_submission.parse_EBI_reply(xml_content, test=test)
except EBISubmissionError as e:
error = str(e)
le = LogEntry.create(
'Fatal', "Command: %s\nError: %s\n" % (xml_content, error),
info={'ebi_submission': artifact_id})
raise ComputeError(
"EBI Submission failed! Log id: %d\n%s" % (le.id, error))
if st_acc:
ebi_submission.study.ebi_study_accession = st_acc
if sa_acc:
ebi_submission.sample_template.ebi_sample_accessions = sa_acc
if bio_acc:
ebi_submission.sample_template.biosample_accessions = bio_acc
if ex_acc:
ebi_submission.prep_template.ebi_experiment_accessions = ex_acc
ebi_submission.artifact.ebi_run_accessions = run_acc
return st_acc, sa_acc, bio_acc, ex_acc, run_acc
def submit_VAMPS(artifact_id):
"""Submit artifact to VAMPS
Parameters
----------
artifact_id : int
The artifact id
Raises
------
ComputeError
- If the artifact cannot be submitted to VAMPS
- If the artifact is associated with more than one prep template
"""
artifact = Artifact(artifact_id)
if not artifact.can_be_submitted_to_vamps:
raise ComputeError("Artifact %d cannot be submitted to VAMPS"
% artifact_id)
study = artifact.study
sample_template = study.sample_template
prep_templates = artifact.prep_templates
if len(prep_templates) > 1:
raise ComputeError(
"Multiple prep templates associated with the artifact: %s"
% artifact_id)
prep_template = prep_templates[0]
# Also need to check that is not submitting (see item in #1523)
if artifact.is_submitted_to_vamps:
raise ValueError("Cannot resubmit artifact %s to VAMPS!" % artifact_id)
# Generating a tgz
targz_folder = mkdtemp(prefix=qiita_config.working_dir)
targz_fp = join(targz_folder, '%d_%d_%d.tgz' % (study.id,
prep_template.id,
artifact_id))
targz = taropen(targz_fp, mode='w:gz')
# adding sample/prep
samp_fp = join(targz_folder, 'sample_metadata.txt')
sample_template.to_file(samp_fp)
targz.add(samp_fp, arcname='sample_metadata.txt')
prep_fp = join(targz_folder, 'prep_metadata.txt')
prep_template.to_file(prep_fp)
targz.add(prep_fp, arcname='prep_metadata.txt')
# adding preprocessed data
for x in artifact.filepaths:
if x['fp_type'] == 'preprocessed_fasta':
targz.add(x['fp'], arcname='preprocessed_fasta.fna')
targz.close()
# submitting
cmd = ("curl -F user=%s -F pass='%s' -F uploadFile=@%s -F "
"press=UploadFile %s" % (qiita_config.vamps_user,
qiita_config.vamps_pass,
targz_fp,
qiita_config.vamps_url))
obs, stderr, rv = system_call(cmd)
if rv != 0:
error_msg = ("Error:\nStd output:%s\nStd error:%s" % (obs, stderr))
raise ComputeError(error_msg)
exp = ("<html>\n<head>\n<title>Process Uploaded File</title>\n</head>\n"
"<body>\n</body>\n</html>")
if obs != exp:
return False
else:
artifact.is_submitted_to_vamps = True
return True
| bsd-3-clause |
andrewnc/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
elsdrium/.unix_settings | .jupyter/jupyter_notebook_config.py | 1 | 19155 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Generate default config file.
# c.JupyterApp.generate_config = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'>
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# The port the notebook server will listen on.
c.NotebookApp.port = 9999
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = u'/etc/jupyter/mycert.pem'
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
#
# c.NotebookApp.file_to_run = ''
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = u'sha1:34021a6e32ab:f4f5416b2264624938a19c0de8ed091d82f291be'
# The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = '/etc/jupyter/mykey.key'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# ipyparallel
# c.NotebookApp.server_extensions.append('ipyparallel.nbextension')
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = traitlets.Undefined
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'elsdrm'
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = traitlets.Undefined
# Debug output in the Session
# c.Session.debug = False
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The UUID identifying this session.
# c.Session.session = ''
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = traitlets.Undefined
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = ''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| mit |
harlowja/networkx | examples/graph/knuth_miles.py | 50 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
wlamond/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 86 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
Cysu/Person-Reid | reid/preproc/imageproc.py | 1 | 3450 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import numpy
import skimage.transform
from skimage.color import rgb2lab
from sklearn.preprocessing import MinMaxScaler, Binarizer
def imtranslate(image, translation):
trans = skimage.transform.AffineTransform(translation=translation)
ret = skimage.transform.warp(image, trans.inverse)
if image.dtype == numpy.uint8:
ret = (ret * 255).astype(numpy.uint8)
return ret
def imresize(image, shape, keep_ratio=False):
"""Resize an image to desired shape
Args:
image: A numpy 2d/3d array
shape: A tuple (h, w) representing the desired height and width
keep_ratio:
False: The image will be stretched
'height': The original height/weight ratio will be reserved. Image
will be scaled to the desired height. Extra columns will be
either truncated or filled with zero.
'width': The original height/weight ratio will be reserved. Image
will be scaled to the desired width. Extra rows will be either
truncated or filled with zero.
"""
if image.ndim == 3:
shape += (image.shape[2],)
elif image.ndim != 2:
raise ValueError("Invalid image dimension")
if keep_ratio == False:
ret = skimage.transform.resize(image, shape)
elif keep_ratio == 'height':
scale = shape[0] * 1.0 / image.shape[0]
image = skimage.transform.rescale(image, scale)
width = image.shape[1]
if width >= shape[1]:
l = (width - shape[1]) // 2
if image.ndim == 3:
ret = image[:, l:shape[1]+l, :]
elif image.ndim == 2:
ret = image[:, l:shape[1]+l]
else:
l = (shape[1] - width) // 2
ret = numpy.zeros(shape)
if image.ndim == 3:
ret[:, l:width+l, :] = image
elif image.ndim == 2:
ret[:, l:width+l] = image
elif keep_ratio == 'width':
scale = shape[1] * 1.0 / image.shape[1]
image = skimage.transform.rescale(image, scale)
height = image.shape[0]
if height >= shape[0]:
l = (height - shape[0]) // 2
if image.ndim == 3:
ret = image[l:shape[0]+l, :, :]
elif image.ndim == 2:
ret = image[l:shape[0]+l, :]
else:
l = (shape[0] - height) // 2
ret = numpy.zeros(shape)
if image.ndim == 3:
ret[l:height+l, :, :] = image
elif image.ndim == 2:
ret[l:height+l, :] = image
else:
raise ValueError("Invalid argument ``keep_ratio``")
if image.dtype == numpy.uint8:
ret = (ret * 255).astype(numpy.uint8)
return ret
def subtract_luminance(rgbimg, mean_luminance=None):
labimg = rgb2lab(rgbimg)
if mean_luminance is None:
mean_luminance = numpy.mean(labimg[:,:,0])
labimg[:,:,0] -= mean_luminance
return labimg
def scale_per_channel(img, scale_range):
h, w, c = img.shape
img = img.reshape(h*w, c)
scaler = MinMaxScaler(scale_range, copy=False)
img = scaler.fit_transform(img)
return img.reshape(h, w, c)
def binarize(img, threshold):
binarizer = Binarizer(threshold, copy=False)
return binarizer.fit_transform(img)
def images2mat(images):
return numpy.asarray(map(lambda x: x.ravel(), images))
| mit |
lukeshingles/evelchemevol | tools/plottwoelements.py | 1 | 2281 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
# import math
import sys
# import numpy as np
for i in range(1):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.12, 0.12, 0.84, 0.84])
if i == 0:
file_suffix = "Pb-Fe"
axisy = "[Pb/Fe]"
ymin = -3.0
ymax = 3.0
axisx = "[Fe/H]"
xmin = -1.0
xmax = 2.5
elif i == 1:
file_suffix = "La-Fe"
axisy = "[La/Fe]"
ymin = -0.5
ymax = 2.5
axisx = "[Fe/H]"
xmin = -0.85
xmax = 0.8
xvalues = []
yvalues = []
header_row = []
axisxnum = -1
axisynum = -1
f = open('out-abundances.txt', 'r')
for line in f.readlines():
row = line.split()
if row[0].startswith("#"):
header_row = row
for i, header in enumerate(header_row):
if header_row[i] == axisx:
axisxnum = i
if header_row[i] == axisy:
axisynum = i
print(i)
if axisxnum == -1 or axisynum == -1:
print("Columns not found")
sys.exit(1)
print(header_row[axisynum] + " vs. " + header_row[axisxnum])
elif axisxnum != -1 and axisynum != -1 and len(row) >= len(header_row):
xvalues.append(float(row[axisxnum]))
yvalues.append(float(row[axisynum]))
# print header_row[axisynum+1]
# print yvalues[0]
# for i in range(1,len(header_row)):
# colors = ['r','g','b','orange','black','purple']
ax.plot(xvalues, yvalues, color='black', marker='None', lw=1.5)
ax.plot(xvalues[0], yvalues[0], color='blue', marker='o', markersize=6,
markeredgewidth=0, label="Initial")
# ax.set_ylim(min(filter(lambda x: x>10**-20,y_arr[i]))/1.5,max(y_arr[i])*1.5)
# ax.legend(loc=3,handlelength=2)
fs = 10
# plt.setp(plt.getp(plt.gca(), 'xticklabels'), fontsize=fs)
# plt.setp(plt.getp(plt.gca(), 'yticklabels'), fontsize=fs)
ax.set_xlabel(axisx, labelpad=8, fontsize=fs)
ax.set_ylabel(axisy, labelpad=8, fontsize=fs)
# ax.set_xlim(xmin,xmax)
# ax.set_ylim(ymin,ymax)
fig.savefig('cemodel-' + file_suffix + '.pdf', format='pdf')
plt.close()
| mit |
jpzk/evopy | evopy/metamodel/svc_linear_meta_model.py | 1 | 5535 | '''
This file is part of evopy.
Copyright 2012, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from collections import deque
from copy import deepcopy
from sklearn import svm
from sklearn import __version__ as sklearn_version
from numpy import sum, sqrt, mean, arctan2, pi, matrix, sin, cos
from numpy import matrix, cos, sin, inner, array, sqrt, arccos, pi, arctan2
from numpy import transpose
from numpy.random import rand
from numpy.random import normal
from numpy.linalg import inv
from meta_model import MetaModel
class SVCLinearMetaModel(MetaModel):
""" SVC meta model which classfies feasible and infeasible points """
def __init__(self, window_size, scaling, crossvalidation, repair_mode):
super(SVCLinearMetaModel, self).__init__()
self._window_size = window_size
self._scaling = scaling
self._training_infeasibles = deque(maxlen = self._window_size)
self._crossvalidation = crossvalidation
self._repair_mode = repair_mode
self.logger.add_binding('_selected_feasibles', 'selected_feasibles')
self.logger.add_binding('_selected_infeasibles', 'selected_infeasibles')
self.logger.add_binding('_best_acc', 'best_acc')
self.logger.add_binding('_best_parameter_C', 'best_parameter_C')
def add_sorted_feasibles(self, feasibles):
reduced_infeasibles = []
for feasible in feasibles:
copied_individual = deepcopy(feasible)
copied_individual.value = [feasible.value[0]]
reduced_infeasibles.append(copied_individual)
self._training_feasibles = reduced_infeasibles
def add_infeasible(self, infeasible):
copied_individual = deepcopy(infeasible)
copied_individual.value = [infeasible.value[0]]
self._training_infeasibles.append(copied_individual)
def check_feasibility(self, individual):
""" Check the feasibility with meta model """
copied_individual = deepcopy(individual)
copied_individual.value = [individual.value[0]]
scaled_individual = self._scaling.scale(copied_individual)
prediction = self._clf.predict(scaled_individual.value)
encode = lambda distance : False if distance < 0 else True
return encode(prediction)
def train(self):
""" Train a meta model classification with new points, return True
if training was successful, False if not enough infeasible points
are gathered """
if(len(self._training_infeasibles) < self._window_size):
self._selected_feasibles = None
self._selected_infeasibles = None
self._best_parameter_C = None
self._best_acc = None
self.logger.log()
return False
cv_feasibles = self._training_feasibles[:self._window_size]
cv_infeasibles = [inf for inf in self._training_infeasibles]
self._scaling.setup(cv_feasibles + cv_infeasibles)
scale = lambda child : self._scaling.scale(child)
scaled_cv_feasibles = map(scale, cv_feasibles)
scaled_cv_infeasibles = map(scale, cv_infeasibles)
self._selected_feasibles, self._selected_infeasibles,\
self._best_parameter_C, self._best_acc =\
self._crossvalidation.crossvalidate(\
scaled_cv_feasibles, scaled_cv_infeasibles)
# @todo WARNING maybe rescale training feasibles/infeasibles (!)
fvalues = [f.value for f in self._selected_feasibles]
ivalues = [i.value for i in self._selected_infeasibles]
points = ivalues + fvalues
labels = [-1] * len(ivalues) + [1] * len(fvalues)
self._clf = svm.SVC(kernel = 'linear', C = self._best_parameter_C, tol = 1.0)
self._clf.fit(points, labels)
self.logger.log()
return True
def get_normal(self):
# VERY IMPORTANT
w = self._clf.coef_[0]
nw = w / sqrt(sum(w ** 2))
if sklearn_version == '0.10':
return -nw
if sklearn_version == '0.11':
return nw
if sklearn_version != '0.10' and sklearn_version != '0.11':
raise Exception("sklearn version is not supported")
def repair(self, individual):
repair_mode = self._repair_mode
val = individual.value
x = [val[0]]
w = self._clf.coef_[0]
nw = self.get_normal()
to_hp = (self._clf.decision_function(x) * (1/sqrt(sum(w ** 2))))
if repair_mode == 'mirror':
s = 2 * to_hp
if repair_mode == 'none':
return individual
if repair_mode == 'project':
s = to_hp
if repair_mode == 'projectsigma':
s = to_hp + mean(individual.sigmas)
if repair_mode == None:
raise Exception("no repair_mode selected: " + repair_mode)
nx = x + (nw * s)
individual.value[0] = nx
return individual
| gpl-3.0 |
themrmax/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
wasade/qiita | qiita_ware/test/test_processing_pipeline.py | 1 | 16556 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
from tempfile import mkdtemp, mkstemp
from os.path import exists, join, basename
from os import remove, close, mkdir
from functools import partial
from shutil import rmtree
import pandas as pd
from qiita_core.util import qiita_test_checker
from qiita_db.util import (get_db_files_base_dir, get_mountpoint,
convert_to_id, get_count)
from qiita_db.data import RawData, PreprocessedData
from qiita_db.study import Study
from qiita_db.parameters import (PreprocessedIlluminaParams,
ProcessedSortmernaParams,
Preprocessed454Params)
from qiita_db.metadata_template import PrepTemplate
from qiita_ware.processing_pipeline import (_get_preprocess_fastq_cmd,
_get_preprocess_fasta_cmd,
_insert_preprocessed_data,
generate_demux_file,
_get_qiime_minimal_mapping,
_get_process_target_gene_cmd,
_insert_processed_data_target_gene)
@qiita_test_checker()
class ProcessingPipelineTests(TestCase):
def setUp(self):
self.db_dir = get_db_files_base_dir()
self.files_to_remove = []
self.dirs_to_remove = []
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
for dp in self.dirs_to_remove:
if exists(dp):
rmtree(dp)
def test_get_qiime_minimal_mapping_single(self):
prep_template = PrepTemplate(1)
out_dir = mkdtemp()
obs_fps = _get_qiime_minimal_mapping(prep_template, out_dir)
exp_fps = [join(out_dir, 's_G1_L001_sequences_MMF.txt')]
# Check that the returned list is as expected
self.assertEqual(obs_fps, exp_fps)
# Check that the file exists
self.assertTrue(exists(exp_fps[0]))
# Check the contents of the file
with open(exp_fps[0], "U") as f:
self.assertEqual(f.read(), EXP_PREP)
def test_get_qiime_minimal_mapping_multiple(self):
# We need to create a prep template in which we have different run
# prefix values, so we can test this case
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAA',
'experiment_design_description': 'BBB'}
}
md_template = pd.DataFrame.from_dict(metadata_dict, orient='index')
prep_template = PrepTemplate.create(md_template, RawData(2), Study(1),
'16S')
out_dir = mkdtemp()
obs_fps = sorted(_get_qiime_minimal_mapping(prep_template, out_dir))
exp_fps = sorted([join(out_dir, 's_G1_L001_sequences_MMF.txt'),
join(out_dir, 's_G1_L002_sequences_MMF.txt')])
# Check that the returned list is as expected
self.assertEqual(obs_fps, exp_fps)
# Check that the file exists
for fp in exp_fps:
self.assertTrue(exists(fp))
# Check the contents of the file
for fp, contents in zip(exp_fps, [EXP_PREP_1, EXP_PREP_2]):
with open(fp, "U") as f:
self.assertEqual(f.read(), contents)
def test_get_preprocess_fastq_cmd(self):
raw_data = RawData(1)
params = PreprocessedIlluminaParams(1)
prep_template = PrepTemplate(1)
obs_cmd, obs_output_dir = _get_preprocess_fastq_cmd(
raw_data, prep_template, params)
get_raw_path = partial(join, self.db_dir, 'raw_data')
seqs_fp = get_raw_path('1_s_G1_L001_sequences.fastq.gz')
bc_fp = get_raw_path('1_s_G1_L001_sequences_barcodes.fastq.gz')
exp_cmd_1 = ("split_libraries_fastq.py --store_demultiplexed_fastq -i "
"{} -b {} "
"-m ".format(seqs_fp, bc_fp))
exp_cmd_2 = ("-o {0} --barcode_type golay_12 --max_bad_run_length 3 "
"--max_barcode_errors 1.5 "
"--min_per_read_length_fraction 0.75 "
"--phred_quality_threshold 3 "
"--sequence_max_n 0".format(obs_output_dir))
# We are splitting the command into two parts because there is no way
# that we can know the filepath of the mapping file. We thus split the
# command on the mapping file path and we check that the two parts
# of the commands is correct
obs_cmd_1 = obs_cmd[:len(exp_cmd_1)]
obs_cmd_2 = obs_cmd[len(exp_cmd_1):].split(" ", 1)[1]
self.assertEqual(obs_cmd_1, exp_cmd_1)
self.assertEqual(obs_cmd_2, exp_cmd_2)
def test_get_preprocess_fasta_cmd_sff(self):
raw_data = RawData(3)
params = Preprocessed454Params(1)
prep_template = PrepTemplate(1)
obs_cmd, obs_output_dir = _get_preprocess_fasta_cmd(
raw_data, prep_template, params)
get_raw_path = partial(join, self.db_dir, 'raw_data')
seqs_fp = [get_raw_path('preprocess_test1.sff'),
get_raw_path('preprocess_test2.sff')]
exp_cmd_1 = ' '.join(["process_sff.py",
"-i %s" % seqs_fp[0],
"-o %s" % obs_output_dir])
exp_cmd_2 = ' '.join(["process_sff.py",
"-i %s" % seqs_fp[1],
"-o %s" % obs_output_dir])
fasta_files = ','.join([join(obs_output_dir, "preprocess_test1.fna"),
join(obs_output_dir, "preprocess_test2.fna")])
qual_files = ','.join([join(obs_output_dir, "preprocess_test1.qual"),
join(obs_output_dir, "preprocess_test2.qual")])
exp_cmd_3a = ' '.join(["split_libraries.py",
"-f %s" % fasta_files])
exp_cmd_3b = ' '.join(["-q %s" % qual_files,
"-d",
"-o %s" % obs_output_dir,
params.to_str()])
exp_cmd_4 = ' '.join(["convert_fastaqual_fastq.py",
"-f %s/seqs.fna" % obs_output_dir,
"-q %s/seqs_filtered.qual" % obs_output_dir,
"-o %s" % obs_output_dir,
"-F"])
obs_cmds = obs_cmd.split('; ')
# We are splitting the command into two parts because there is no way
# that we can know the filepath of the mapping file. We thus split the
# command on the mapping file path and we check that the two parts
# of the commands is correct
obs_cmd_3a, obs_cmd_3b_temp = obs_cmds[2].split(' -m ', 1)
obs_cmd_3b = obs_cmd_3b_temp.split(' ', 1)[1]
self.assertEqual(obs_cmds[0], exp_cmd_1)
self.assertEqual(obs_cmds[1], exp_cmd_2)
self.assertEqual(obs_cmd_3a, exp_cmd_3a)
self.assertEqual(obs_cmd_3b, exp_cmd_3b)
self.assertEqual(obs_cmds[3], exp_cmd_4)
def test_insert_preprocessed_data(self):
study = Study(1)
params = PreprocessedIlluminaParams(1)
prep_template = PrepTemplate(1)
prep_out_dir = mkdtemp()
self.dirs_to_remove.append(prep_out_dir)
path_builder = partial(join, prep_out_dir)
db_path_builder = partial(join, join(self.db_dir, "preprocessed_data"))
file_suffixes = ['seqs.fna', 'seqs.fastq', 'seqs.demux',
'split_library_log.txt']
db_files = []
for f_suff in file_suffixes:
fp = path_builder(f_suff)
with open(fp, 'w') as f:
f.write("\n")
self.files_to_remove.append(fp)
db_files.append(db_path_builder("3_%s" % f_suff))
self.files_to_remove.extend(db_files)
_insert_preprocessed_data(study, params, prep_template,
prep_out_dir)
# Check that the files have been copied
for fp in db_files:
self.assertTrue(exists(fp))
# Check that a new preprocessed data has been created
self.assertTrue(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=%s)", (3, ))[0])
def test_generate_demux_file(self):
prep_out_dir = mkdtemp()
with open(join(prep_out_dir, 'seqs.fastq'), "w") as f:
f.write(DEMUX_SEQS)
obs_fp = generate_demux_file(prep_out_dir)
exp_fp = join(prep_out_dir, 'seqs.demux')
self.assertEqual(obs_fp, exp_fp)
self.assertTrue(exists(exp_fp))
def test_get_process_target_gene_cmd(self):
preprocessed_data = PreprocessedData(1)
params = ProcessedSortmernaParams(1)
obs_cmd, obs_output_dir = _get_process_target_gene_cmd(
preprocessed_data, params)
_, ref_dir = get_mountpoint('reference')[0]
_, preprocessed_dir = get_mountpoint('preprocessed_data')[0]
exp_cmd = ("pick_closed_reference_otus.py -i {}1_seqs.fna -r "
"{}GreenGenes_13_8_97_otus.fasta -o {} -p placeholder -t "
"{}GreenGenes_13_8_97_otu_taxonomy.txt".format(
preprocessed_dir, ref_dir, obs_output_dir, ref_dir))
obs_tokens = obs_cmd.split()[::-1]
exp_tokens = exp_cmd.split()[::-1]
self.assertEqual(len(obs_tokens), len(exp_tokens))
while obs_tokens:
o_t = obs_tokens.pop()
e_t = exp_tokens.pop()
if o_t == '-p':
# skip parameters file
obs_tokens.pop()
exp_tokens.pop()
else:
self.assertEqual(o_t, e_t)
def test_insert_processed_data_target_gene(self):
fd, fna_fp = mkstemp(suffix='_seqs.fna')
close(fd)
fd, qual_fp = mkstemp(suffix='_seqs.qual')
close(fd)
filepaths = [
(fna_fp, convert_to_id('preprocessed_fasta', 'filepath_type')),
(qual_fp, convert_to_id('preprocessed_fastq', 'filepath_type'))]
preprocessed_data = PreprocessedData.create(
Study(1), "preprocessed_sequence_illumina_params", 1,
filepaths, data_type="18S")
params = ProcessedSortmernaParams(1)
pick_dir = mkdtemp()
path_builder = partial(join, pick_dir)
db_path_builder = partial(join, get_mountpoint('processed_data')[0][1])
# Create a placeholder for the otu table
with open(path_builder('otu_table.biom'), 'w') as f:
f.write('\n')
# Create a placeholder for the directory
mkdir(path_builder('sortmerna_picked_otus'))
# Create the log file
fd, fp = mkstemp(dir=pick_dir, prefix='log_', suffix='.txt')
close(fd)
with open(fp, 'w') as f:
f.write('\n')
_insert_processed_data_target_gene(preprocessed_data, params, pick_dir)
new_id = get_count('qiita.processed_data')
# Check that the files have been copied
db_files = [db_path_builder("%s_otu_table.biom" % new_id),
db_path_builder("%s_sortmerna_picked_otus" % new_id),
db_path_builder("%s_%s" % (new_id, basename(fp)))]
for fp in db_files:
self.assertTrue(exists(fp))
# Check that a new preprocessed data has been created
self.assertTrue(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.processed_data WHERE "
"processed_data_id=%s)", (new_id, ))[0])
DEMUX_SEQS = """@a_1 orig_bc=abc new_bc=abc bc_diffs=0
xyz
+
ABC
@b_1 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
+
DFG
@b_2 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
+
DEF
"""
EXP_PREP = (
"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tDescription\n"
"1.SKB1.640202\tGTCCGCAAGTTA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB2.640194\tCGTAGAGCTCTC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB3.640195\tCCTCTGAGAGCT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB4.640189\tCCTCGATGCAGT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB5.640181\tGCGGACTATTCA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB6.640176\tCGTGCACAATTG\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB7.640196\tCGGCCTAAGTTC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB8.640193\tAGCGCTCACATC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKB9.640200\tTGGTTATGGCAC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD1.640179\tCGAGGTTCTGAT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD2.640178\tAACTCCTGTGGA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD3.640198\tTAATGGTCGTAG\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD4.640185\tTTGCACCGTCGA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD5.640186\tTGCTACAGACGT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD6.640190\tATGGCCTGACTA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD7.640191\tACGCACATACAA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD8.640184\tTGAGTGGTCTGT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD9.640182\tGATAGCACTCGT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM1.640183\tTAGCGCGAACTT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM2.640199\tCATACACGCACC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM3.640197\tACCTCAGTCAAG\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM4.640180\tTCGACCAAACAC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM5.640177\tCCACCCAGTAAC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM6.640187\tATATCGCGATGA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM7.640188\tCGCCGGTAATCT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM8.640201\tCCGATGCCTTGA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKM9.640192\tAGCAGGCACGAA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n")
EXP_PREP_1 = (
"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tDescription\n"
"1.SKB8.640193\tGTCCGCAAGTTA\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n"
"1.SKD8.640184\tCGTAGAGCTCTC\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n")
EXP_PREP_2 = (
"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tDescription\n"
"1.SKB7.640196\tCCTCTGAGAGCT\tGTGCCAGCMGCCGCGGTAA\tQiita MMF\n")
if __name__ == '__main__':
main()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/cluster/tests/test_k_means.py | 63 | 26190 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
| bsd-3-clause |
Esmidth/DIP | practice/16.py | 1 | 3057 | import numpy as np
import matplotlib.pyplot as plt
from skimage import draw, transform, feature, color, util, data
def ex_1(): # Detect circle in img & draw them in images
img = np.zeros((250, 250, 3), dtype=np.uint8)
rr, cc = draw.circle_perimeter(60, 60, 50)
rr1, cc1 = draw.circle_perimeter(150, 150, 60)
img[cc, rr, :] = 255
img[cc1, rr1, :] = 255
fig, (ax0, ax1) = plt.subplots(1, 2)
ax0.imshow(img)
ax0.set_title('origin')
hough_radii = np.arange(50, 80, 5)
hough_res = transform.hough_circle(img[:, :, 0], hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
num_peaks = 2
peaks = feature.peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
image = np.copy(img)
for idx in np.argsort(accums)[::-1][:2]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = draw.circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (255, 0, 0)
ax1.imshow(image)
ax1.set_title('detected image')
plt.show()
def ex_2():
image = util.img_as_ubyte(data.coins()[0:95, 70:370])
edges = feature.canny(image, sigma=3, low_threshold=10, high_threshold=50)
fig, (ax0, ax1) = plt.subplots(1, 2)
ax0.imshow(edges, cmap=plt.cm.gray)
ax0.set_title('origin')
hough_radii = np.arange(15, 30, 2)
hough_res = transform.hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
num_peaks = 2
peaks = feature.peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
image = color.gray2rgb(image)
for idx in np.argsort(accums)[::-1][:5]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = draw.circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (255, 0, 0)
ax1.imshow(image)
ax1.set_title('detected image')
plt.show()
def ex_3():
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2grey(image_rgb)
edges = feature.canny(image_gray, sigma=2.0, low_threshold=0.55, high_threshold=0.8)
result = transform.hough_ellipse(edges, accuracy=20, threshold=250, min_size=100, max_size=120)
result.sort(order='accumulator')
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
cy, cx = draw.ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
edges = color.gray2rgb(edges)
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4))
ax1.set_title('Origin')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
plt.show()
if __name__ == "__main__":
ex_3()
| apache-2.0 |
mementum/backtrader | backtrader/indicators/ols.py | 1 | 3987 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
from . import PeriodN
__all__ = ['OLS_Slope_InterceptN', 'OLS_TransformationN', 'OLS_BetaN',
'CointN']
class OLS_Slope_InterceptN(PeriodN):
'''
Calculates a linear regression using ``statsmodel.OLS`` (Ordinary least
squares) of data1 on data0
Uses ``pandas`` and ``statsmodels``
'''
_mindatas = 2 # ensure at least 2 data feeds are passed
packages = (
('pandas', 'pd'),
('statsmodels.api', 'sm'),
)
lines = ('slope', 'intercept',)
params = (
('period', 10),
)
def next(self):
p0 = pd.Series(self.data0.get(size=self.p.period))
p1 = pd.Series(self.data1.get(size=self.p.period))
p1 = sm.add_constant(p1)
intercept, slope = sm.OLS(p0, p1).fit().params
self.lines.slope[0] = slope
self.lines.intercept[0] = intercept
class OLS_TransformationN(PeriodN):
'''
Calculates the ``zscore`` for data0 and data1. Although it doesn't directly
uses any external package it relies on ``OLS_SlopeInterceptN`` which uses
``pandas`` and ``statsmodels``
'''
_mindatas = 2 # ensure at least 2 data feeds are passed
lines = ('spread', 'spread_mean', 'spread_std', 'zscore',)
params = (('period', 10),)
def __init__(self):
slint = OLS_Slope_InterceptN(*self.datas)
spread = self.data0 - (slint.slope * self.data1 + slint.intercept)
self.l.spread = spread
self.l.spread_mean = bt.ind.SMA(spread, period=self.p.period)
self.l.spread_std = bt.ind.StdDev(spread, period=self.p.period)
self.l.zscore = (spread - self.l.spread_mean) / self.l.spread_std
class OLS_BetaN(PeriodN):
'''
Calculates a regression of data1 on data0 using ``pandas.ols``
Uses ``pandas``
'''
_mindatas = 2 # ensure at least 2 data feeds are passed
packages = (
('pandas', 'pd'),
)
lines = ('beta',)
params = (('period', 10),)
def next(self):
y, x = (pd.Series(d.get(size=self.p.period)) for d in self.datas)
r_beta = pd.ols(y=y, x=x, window_type='full_sample')
self.lines.beta[0] = r_beta.beta['x']
class CointN(PeriodN):
'''
Calculates the score (coint_t) and pvalue for a given ``period`` for the
data feeds
Uses ``pandas`` and ``statsmodels`` (for ``coint``)
'''
_mindatas = 2 # ensure at least 2 data feeds are passed
packages = (
('pandas', 'pd'), # import pandas as pd
)
frompackages = (
('statsmodels.tsa.stattools', 'coint'), # from st... import coint
)
lines = ('score', 'pvalue',)
params = (
('period', 10),
('trend', 'c'), # see statsmodel.tsa.statttools
)
def next(self):
x, y = (pd.Series(d.get(size=self.p.period)) for d in self.datas)
score, pvalue, _ = coint(x, y, trend=self.p.trend)
self.lines.score[0] = score
self.lines.pvalue[0] = pvalue
| gpl-3.0 |
yqzhang/OpenANN | examples/sine/sine.py | 5 | 1262 | ## \page Sine Sine
#
# \section DataSet Data Set
#
# In this example, a sine function will be approximated from noisy measurements.
# This is an example for nonlinear regression. To run this example, you have
# to install matplotlib. It is a plotting library for Python.
#
# \section Code
#
# \include "sine/sine.py"
try:
import pylab
except:
print("Matplotlib is required")
exit(1)
from openann import *
import numpy
# Create network
net = Net()
net.set_regularization(0.0, 0.0001, 0.0)
net.input_layer(1)
net.fully_connected_layer(10, Activation.LOGISTIC)
net.fully_connected_layer(10, Activation.LOGISTIC)
net.output_layer(1, Activation.LINEAR)
# Create dataset
X = numpy.linspace(0, 2*numpy.pi, 500)[:, numpy.newaxis]
T = numpy.sin(X) + numpy.random.randn(*X.shape) * 0.1
dataset = DataSet(X, T)
Log.info("Using %d samples with %d inputs and %d outputs"
% (dataset.samples(), dataset.inputs(), dataset.outputs()))
# Train network
stop = {
"maximal_iterations": 50,
"minimal_value_differences": 1e-8
}
lma = LMA(stop)
lma.optimize(net, dataset)
# Predict data
Y = net.predict(X)
# Plot dataset and hypothesis
pylab.plot(X, T, ".", label="Data Set")
pylab.plot(X, Y, label="Prediction", linewidth=3)
pylab.legend()
pylab.show()
| gpl-3.0 |
buqing2009/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
tiagofrepereira2012/bob.measure | bob/measure/script/plot_cmc.py | 1 | 3521 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Manuel Guenther <[email protected]>
# Tue Jan 8 13:36:12 CET 2013
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
from __future__ import print_function
"""This script computes and plot a cumulative rank characteristics (CMC) curve
from a score file in four or five column format.
Note: The score file has to contain the exact probe file names as the 3rd
(4column) or 4th (5column) column.
"""
import os
import sys
def parse_command_line(command_line_options):
"""Parse the program options"""
usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0])
import argparse
parser = argparse.ArgumentParser(usage=usage, description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# This option is not normally shown to the user...
parser.add_argument('--self-test', action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument('-s', '--score-file', required = True, help = 'The score file in 4 or 5 column format to test.')
parser.add_argument('-o', '--output-pdf-file', default = 'cmc.pdf', help = 'The PDF file to write.')
parser.add_argument('-l', '--log-x-scale', action='store_true', help = 'Plot logarithmic Rank axis.')
parser.add_argument('-x', '--no-plot', action = 'store_true', help = 'Do not print a PDF file, but only report the results.')
parser.add_argument('-p', '--parser', default = '4column', choices = ('4column', '5column'), help = 'The type of the score file.')
args = parser.parse_args(command_line_options)
if args.self_test:
# then we go into test mode, all input is preset
import tempfile
temp_dir = tempfile.mkdtemp(prefix="bobtest_")
args.output_pdf_file = os.path.join(temp_dir, "cmc.pdf")
print("temporary using file", args.output_pdf_file)
return args
def main(command_line_options = None):
"""Computes and plots the CMC curve."""
from .. import load, plot, recognition_rate
args = parse_command_line(command_line_options)
# read data
if not os.path.isfile(args.score_file): raise IOError("The given score file does not exist")
# pythonic way: create inline dictionary "{...}", index with desired value "[...]", execute function "(...)"
data = {'4column' : load.cmc_four_column, '5column' : load.cmc_five_column}[args.parser](args.score_file)
# compute recognition rate
rr = recognition_rate(data)
print("Recognition rate for score file", args.score_file, "is %3.2f%%" % (rr * 100))
if not args.no_plot:
# compute CMC
import matplotlib
if not hasattr(matplotlib, 'backends'): matplotlib.use('pdf')
import matplotlib.pyplot as mpl
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(args.output_pdf_file)
# CMC
fig = mpl.figure()
max_rank = plot.cmc(data, color=(0,0,1), linestyle='--', dashes=(6,2), logx = args.log_x_scale)
mpl.title("CMC Curve")
if args.log_x_scale:
mpl.xlabel('Rank (log)')
else:
mpl.xlabel('Rank')
mpl.ylabel('Recognition Rate in %')
mpl.grid(True, color=(0.3,0.3,0.3))
mpl.ylim(ymax=101)
# convert log-scale ticks to normal numbers
ticks = [int(t) for t in mpl.xticks()[0]]
mpl.xticks(ticks, ticks)
mpl.xlim([0.9, max_rank + 0.1])
pp.savefig(fig)
pp.close()
if args.self_test: #remove output file + tmp directory
import shutil
shutil.rmtree(os.path.dirname(args.output_pdf_file))
return 0
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause |
IPGP/DSM-Kernel | util/WindowSelectionTools/window2kernelinf.py | 1 | 20022 | '''
Created on 11 aout 2013
@author: hugojaegler
'''
import numpy as np
import matplotlib.pyplot as plt
from pylab import vlines
from obspy.core import read
import subprocess
from Tkinter import Tk, Canvas, Frame, Scrollbar, Label, Entry, Button, Menu, Menubutton, Listbox, StringVar, IntVar, RAISED, Checkbutton
from shutil import Error
class Path:
def __init__(self, root):
self.canvas = Canvas(root, borderwidth=1, background="#ffffff")
self.frame = Frame(self.canvas, background="#ffffff")
self.vsb = Scrollbar(root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.OnFrameConfigure)
self.data()
def data(self):
global textPath
textPath = StringVar()
global text0a
text0a = StringVar()
global text0b
text0b = StringVar()
global text2a
text2a = StringVar()
global text3
text3 = StringVar()
global alphaVar
alphaVar = IntVar()
global betaVar
betaVar = IntVar()
global allVar
allVar = IntVar()
global text6a
text6a = "0"
global filterVar
filterVar = IntVar()
global text6b
text6b = StringVar()
global t1x
t1x = ""
global t2x
t2x = ""
global t3x
t3x = ""
global t4x
t4x = ""
global text8_0
text8_0 = StringVar()
global text8_1
text8_1 = StringVar()
Label(self.frame,text="Path ? ").grid(row=0, column=0)
Entry(self.frame,textvariable=textPath).grid(row=1, column=0)
Button(self.frame, text="Valider et afficher", command = affiche_recap).grid(row=1, column=1)
Label(self.frame, text="Green function database information file\n (for a certain depth only for the instance) ?").grid(row=3)
Entry(self.frame, textvariable=text0a).grid(row=4)
Label(self.frame, text="Output directory (parentdir) ?").grid(row=5)
Entry(self.frame, textvariable=text0b).grid(row=6)
Label(self.frame, text="Phase name ?").grid(row=9)
Entry(self.frame, textvariable=text3).grid(row=10)
def afficheAlpha():
seismicPara["text"]="alpha"
betaVar.set(0)
allVar.set(0)
def afficheBeta():
seismicPara["text"]="beta"
alphaVar.set(0)
allVar.set(0)
def afficheAll():
seismicPara["text"]="all"
alphaVar.set(0)
betaVar.set(0)
seismicPara = Menubutton(self.frame, text="Seismic Parameter", relief=RAISED)
seismicPara.grid(row=0)
seismicPara.menu = Menu(seismicPara, tearoff = 0)
seismicPara["menu"] = seismicPara.menu
seismicPara.menu.add_checkbutton(label="alpha", variable = alphaVar, command = afficheAlpha)
seismicPara.menu.add_checkbutton(label="beta", variable = betaVar, command = afficheBeta)
seismicPara.menu.add_checkbutton(label="all", variable = allVar, command = afficheAll)
seismicPara.grid(row=11)
Label(self.frame, text="Filter name ?").grid(row=12)
Entry(self.frame, textvariable=text6b).grid(row=13)
Label(self.frame, text="time window t1 ?").grid(row=14)
Labelt1 = Label(self.frame, text="-->").grid(row=15)
Button(self.frame, text="time 1", command=self.time1).grid(row=15, column=1)
Label(self.frame, text="time window t2 ?").grid(row=16)
Labelt1 = Label(self.frame, text="-->").grid(row=17)
Button(self.frame, text="time 2", command=self.time2).grid(row=17, column=1)
'''
Label(self.frame, text="time window t3 ?").grid(row=18)
Labelt1 = Label(self.frame, text="-->").grid(row=19)
Button(self.frame, text="time 3", command=self.time3).grid(row=19, column=1)
Label(self.frame, text="time window t4 ?").grid(row=20)
Labelt1 = Label(self.frame, text="-->").grid(row=21)
Button(self.frame, text="time 4", command=self.time4).grid(row=21, column=1)
'''
def affiche0():
convertPara["text"]="No conversion"
text8_1.set(0)
def affiche1():
convertPara["text"]="Conversion"
text8_0.set(0)
convertPara = Menubutton(self.frame, text="Geodetic latitude to geocentric latitude conversion", relief=RAISED)
convertPara.grid(row=0)
convertPara.menu = Menu(convertPara, tearoff = 0)
convertPara["menu"] = convertPara.menu
convertPara.menu.add_checkbutton(label="No conversion", variable = text8_0, command = affiche0)
convertPara.menu.add_checkbutton(label="Conversion", variable = text8_1, command = affiche1)
convertPara.grid(row=22)
b = Checkbutton(self.frame, text = "apply filter", variable = filterVar)
b.grid(row=23, column = 0)
Button(self.frame, text="continue", command=self.quitter).grid(row=23, column=1)
def time1(self):
global t1x
global t1y
t1x, t1y = Pointage()
print type(t1x)
print t1y
def time2(self):
global t2x
global t2y
t2x, t2y = Pointage()
print t2x
print t2y
def time3(self):
t3x, t3y = Pointage()
print t3x
print t3y
def time4(self):
t4x, t4y = Pointage()
print t4x
print t4y
def quitter(self):
root.destroy()
def OnFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
class RecapCalculs:
'''
Interface graphique recapitulant les caracteristique du sismogramme
presentant les options de filtrage et de calculs du noyau de sensibilite
'''
def __init__(self,root):
self.canvas = Canvas(root, borderwidth=1, background="#ffffff")
self.frame = Frame(self.canvas, background="#ffffff")
self.vsb = Scrollbar(root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.OnFrameConfigure)
self.data()
def data(self):
self.message = Label(self.frame, text="Recapitulatif du sismogramme").grid(row=0)
self.recap = Listbox(self.frame, height = 15, width = 50)
self.recap.insert(1, "network: {}\n".format(X[0].stats.network))
self.recap.insert(2, "station: {}\n".format(X[0].stats.station))
self.recap.insert(3, "location: {}\n".format(X[0].stats.location))
self.recap.insert(4, "channel: {}\n".format(X[0].stats.channel))
self.recap.insert(5, "start time: {}\n".format(X[0].stats.starttime))
self.recap.insert(6, "end time: {}\n".format(X[0].stats.endtime))
self.recap.insert(7, "sampling rate: {}\n".format(X[0].stats.sampling_rate))
self.recap.insert(8, "delta: {}\n".format(X[0].stats.delta))
self.recap.insert(9, "number points: {}\n".format(X[0].stats.npts))
self.recap.insert(10, "calibration: {}\n".format(X[0].stats.calib))
self.recap.insert(11, "event latitude: {}\n".format(X[0].stats.sac.evla))
self.recap.insert(12, "event longitude: {}\n".format(X[0].stats.sac.evlo))
self.recap.insert(13, "event depth: {}\n".format(X[0].stats.sac.evdp))
self.recap.insert(14, "station latitude: {}\n".format(X[0].stats.sac.stla))
self.recap.insert(15, "station longitude: {}\n".format(X[0].stats.sac.stlo))
self.recap.grid(row=0)
def OnFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def affiche_recap():
global X
X=read(textPath.get())
plt.figure(1)
t = np.arange(0, X[0].stats.npts / X[0].stats.sampling_rate, X[0].stats.delta)
plt.subplot(111)
plt.plot(t, X[0].data, 'k')
plt.ylabel('Raw Data')
plt.xlabel('Time [s]')
plt.suptitle(textPath.get())
plt.show()
root = Tk()
fenetre = RecapCalculs(root)
root.geometry("500x300+200+0")
root.mainloop()
class filterswindow:
'''
Interface graphique recapitulant les caracteristique du sismogramme
presentant les options de filtrage et de calculs du noyau de sensibilite
'''
def __init__(self,racine):
self.canvas = Canvas(racine, borderwidth=1, background="#ffffff")
self.frame = Frame(self.canvas, background="#ffffff")
self.vsb = Scrollbar(racine, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.OnFrameConfigure)
self.data()
def data(self):
global filterVar
filterVar = 1
global text6a
text6a = "1"
global text6c1
text6c1 = StringVar()
global text6c2
text6c2 = StringVar()
global text6c3
text6c3 = StringVar()
Label(self.frame, text="Option Filter").grid(row=0)
Label(self.frame, text="\n").grid(row=1)
Label(self.frame, text="lowest frequency ?").grid(row=4)
e1 = Entry(self.frame, textvariable=text6c1)
e1.grid(row=5)
Label(self.frame, text="highest frequency ?").grid(row=20)
e2 = Entry(self.frame, textvariable=text6c2)
e2.grid(row=21)
Label(self.frame, text="number of poles ?").grid(row=22)
e3 = Entry(self.frame, textvariable=text6c3)
e3.grid(row=23)
Button(self.frame, text="continue", command=self.quitter).grid(row=24)
def quitter(self):
global racine
racine.destroy()
afficheSismoFiltre(textPath.get(), float(text6c1.get()), float(text6c2.get()), float(text6c3.get()))
def OnFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def optionFilter():
global racine
racine = Tk()
fenetre = filterswindow(racine)
racine.geometry("500x300+200+0")
racine.mainloop()
def afficheSismoFiltre(Path, frequenceMin, frequenceMax, nbPoles):
# Read the seismogram
st = read(Path)
# There is only one trace in the Stream object, let's work on that trace...
tr = st[0]
# Filtering with a lowpass on a copy of the original Trace
tr_filt = tr.copy()
tr_filt.filter('bandpass', freqmin = frequenceMin, freqmax = frequenceMax, corners = nbPoles, zerophase=True)
# Now let's plot the raw and filtered data...
t = np.arange(0, tr.stats.npts / tr.stats.sampling_rate, tr.stats.delta)
plt.subplot(211)
plt.plot(t, tr.data, 'k')
plt.ylabel('Raw Data')
plt.subplot(212)
plt.plot(t, tr_filt.data, 'k')
plt.ylabel('filtered Data')
plt.xlabel('Time [s]')
plt.suptitle(tr.stats.starttime)
plt.show()
class LineBuilder :
'''
Fonction permettant de creer des lignes verticales sur un sismo et
d'enregistrer les coordonees des endroits pointes sur la courbe
'''
def __init__(self, line):
self.line = line
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
line.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
print 'click', event
if event.inaxes!=self.line.axes: return
self.xs = [event.xdata, event.xdata]
self.ys = [ymin, ymax]
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
return True
def onclick(event):
if enregx == []:
enregx.append(event.xdata)
enregy.append(X[0].data[event.xdata])
vlines(event.xdata, ymin, ymax, color='k', linestyles='dashed')
def Pointage():
global enregx
global enregy
enregx=[]
enregy=[]
global X
X=read(textPath.get())
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('click on graphic to store data')
line, = ax.plot(np.arange(0, len(X[0])) , X[0].data, 'k')
plt.xlim(0, len(X[0]))
global ymin
global ymax
ymin, ymax = plt.ylim(X[0].data.min()*1.1, X[0].data.max()*1.1)
fig.canvas.mpl_connect('button_press_event', onclick)
line, = ax.plot([0], [0])
linebuilder = LineBuilder(line)
plt.show()
print enregx # Abscisse des points selectionnes
print enregy # Ordonnees lues sur le sismo
return enregx[0], enregy[0]
def main():
'''
Premiere partie : l'utilisateur entre le chemin du sismo. On affiche
alors les caracteristiques du sismo
'''
global root
root = Tk()
fenetrePath = Path(root)
root.geometry("800x600")
root.mainloop()
print filterVar.get()
if filterVar.get() == 1 :
global racine
racine = Tk()
fenetre = filterswindow(racine)
racine.geometry("500x300+200+0")
racine.mainloop()
text6c = text6c1.get() + ", " + text6c2.get() + ", " + text6c3.get() + "\n"
global X
X=read(textPath.get())
splited_path = textPath.get().split('.')
seismicParaVar = [alphaVar.get(),betaVar.get(),allVar.get()]
print seismicParaVar
conversionPara = "0"
if text8_1 == 1: conversionPara = "1"
with open('donnees.inf', 'w') as fichier:
fichier.write('# 0a. Green function database information file (for a certain depth only for the instance)\n')
fichier.write(text0a.get() + '\n')
fichier.write('# 0b. output directory (parentdir)\n')
fichier.write(text0b.get() + '\n')
fichier.write('# 1a. event name\n')
fichier.write(splited_path[1] + '\n')
fichier.write('# 1b. event latitude, longitude, depth (however, interpolation for depths won\'t be performed)\n')
fichier.write(str(X[0].stats.sac.evla) + ", ")
fichier.write(str(X[0].stats.sac.evlo) + ", ")
fichier.write(str(X[0].stats.sac.evdp) + '\n')
fichier.write('# 1c. Mrr, Mtt, Mpp, Mrt, Mrp, Mtp\n')
fichier.write('1.0 1.0 0.0 1.0 0.0 1.0\n')
fichier.write('# 2a. station name\n')
fichier.write(splited_path[0] + '\n')
fichier.write('# 2b. station latitude, longitude\n')
fichier.write(str(X[0].stats.sac.stla) + ", ")
fichier.write(str(X[0].stats.sac.stlo) + "\n")
fichier.write('# 3. phase name\n')
fichier.write(text3.get() + '\n')
fichier.write('# 4. component (Z,R,T)\n')
fichier.write(splited_path[2] + '\n')
fichier.write('# 5. seismic parameter (alpha, beta, or all for this version)\n# if you choose "test" the program will only give you the synthetic\n# (fort.13 in your directory too)\n')
if seismicParaVar[0]==1: fichier.write('alpha\n')
elif seismicParaVar[1]==1 : fichier.write('beta\n')
else : fichier.write('all\n')
fichier.write('# 6a. Butterworth filter (if 1 on; if 0 off)\n')
fichier.write(text6a +"\n")
fichier.write('# 6b. filter name (mandatory even if 6a=0 )\n')
fichier.write(text6b.get() + "\n")
fichier.write("# 6c. if butterworth = 1; lowest freq., highest freq., number of poles\n# if butterworth = 0; just comment out those parameters (subroutine won\'t read them)\n")
if filterVar == 1:
fichier.write(text6c)
else : fichier.write('#\n')
fichier.write('# 7. time window t1, t2, t3, t4 \n# (if t1=t2 and t3=t4, fwin(:) will be rectangular)\n# (normally taper functions are sine functions)\n')
fichier.write(str(t1x) + ", ")
fichier.write(str(t2x) + "\n")
#fichier.write(t3x + ", ")
#fichier.write(t4x + "\n")
fichier.write('# 8. itranslat (1 if you convert geodetic latitude to geocentric latitude)\n')
fichier.write(conversionPara + '\n')
fichier.write('#\n#\n# Below are minor parameters for kernel calculations\n# (i.e. you can leave them as they are to start with)\n#\n# Aa. SINC interpolation window (ipdistance deg) (it works well with 10-20 degrees)\n')
fichier.write('10.d0\n')
fichier.write('# Ab. reducing slowness for interplation (c_red_reci s/deg) (if 0.d0 we do not perform slowness reduction)\n')
fichier.write('0.d0\n')
fichier.write('# Ba. fast FFT (if 1 on; if 0 off)\n# you can re-define imin and imax for FFT of Green functions\n# thence you can avoid reading frequencies for which you don\'t have to account.\n#\n')
fichier.write('0\n')
fichier.write('# Bb. if fast FFT = 1; lowest i(freq), highest i(freq) (note that freq = i(freq)/tlen)\n# if fast FFT = 0; just comment out those parameters (subroutine won\'t read them)\n')
fichier.write('#0 256 \n')
fichier.write('# Ca. gridding and extent in R(longitudinal) direction (dph, ph1)\n')
fichier.write('2.5d-1 5.d0\n')
fichier.write('# Cb. gridding and extent in T(transverse) direction (dth, thw)\n')
fichier.write('2.5d-1 5.d0\n')
fichier.write('# Cd. gridding in radius (rmin, rmax, deltar : should correspond to grids in catalogue)\n# if you put 0.d0 0.d0 0.d0 then the program will take the grids in catalogue\n')
fichier.write('0.d0 0.d0 0.d0\n')
fichier.write('# Da. time window (start, end in sec)\n')
fichier.write('0.d0 1.3d3\n')
fichier.write('# Db. sampling Hz\n')
fichier.write('2.d0\n')
fichier.write('# Ea. ignoring criteria (calculrapide: we ignore the values below; if 0.d0 we don\'t use the algo)\n# (in Fuji et al. 2012b, we chose 5.d-3)\n')
fichier.write('0.d0\n')
fichier.write('#\n# Eb. number of kernel types for the ignoring scheme (if Ea. = 0.d0, just comment out all)\n')
fichier.write('1\n')
fichier.write('# Ec. kernel type for ignoring scheme (if 0 we calculate for the envelop) note them vertically\n')
fichier.write('0\n')
fichier.write('# F. PSV/SH (PSV only = 2; SH only = 1; PSV+SH =3)\n')
fichier.write('3\n')
fichier.write('# don\'t forget write \'end\' at the end\n')
fichier.write('end\n')
newPath = textPath.get() + ".inf"
print("enregistre sous :%s" % newPath)
try:
subprocess.call('cp donnees.inf %s' % newPath, shell = True)
except (Error, OSError), e:
print "Attempt to copy failed: %s" % e
subprocess.call('rm donnees.inf', shell = True)
'''
Deuxieme partie : on affiche le sismo. L'utilisateur peut pointer
sur le sismo et on enregistrer ces coordonnees sur la courbe.
'''
if __name__ == "__main__":
main()
| gpl-3.0 |
daodaoliang/neural-network-animation | matplotlib/backends/backend_tkagg.py | 10 | 31659 | # Todd Miller [email protected]
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
from six.moves import tkinter_filedialog as FileDialog
import os, sys, math
import os.path
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
from six.moves import tkinter_messagebox as tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class Show(ShowBase):
def mainloop(self):
Tk.mainloop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
_focus = windowing.FocusManager()
window = Tk.Tk()
window.withdraw()
if Tk.TkVersion >= 8.5:
# put a mpl icon on the window rather than the default tk icon. Tkinter
# doesn't allow colour icons on linux systems, but tk >=8.5 has a iconphoto
# command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(rcParams['datapath'], 'images', 'matplotlib.gif')
icon_img = Tk.PhotoImage(file=icon_fname)
try:
window.tk.call('wm', 'iconphoto', window._w, icon_img)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# log the failure, but carry on
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode.
if not self._single and len(self.callbacks) > 0:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65515 : 'super',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy)
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. e.g., if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicity
#
# 2) call enter/leave events explicity. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if thes mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surpise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc,yc))
else:
self.leave_notify_event(guiEvent)
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
if d:
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self,event):
self.button_press_event(event,dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val == 0 and sys.platform == 'darwin' and \
event.keycode in self._keycode_lookup:
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
if sys.platform == 'win32':
modifiers = [(17, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
elif sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
else:
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# note, shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
if self.toolbar is not None:
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command, extension='.ppm'):
img_file = os.path.join(rcParams['datapath'], 'images', file + extension)
im = Tk.PhotoImage(master=self, file=img_file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# spacer, unhandled in Tk
pass
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = rcParams.get('savefig.directory', '')
initialdir = os.path.expanduser(initialdir)
initialfile = self.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = Tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tk.TclError:
pass
label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
background="#ffffe0", relief=Tk.SOLID, borderwidth=1,
)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
FigureCanvas = FigureCanvasTkAgg
FigureManager = FigureManagerTkAgg
| mit |
sfairhur/pycbc | setup.py | 2 | 10492 | #!/usr/bin/env python
# Copyright (C) 2012 Alex Nitz, Duncan Brown, Andrew Miller, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
setup.py file for PyCBC package
"""
from __future__ import print_function
import sys
import os, subprocess, shutil
from distutils.errors import DistutilsError
from distutils.command.clean import clean as _clean
from setuptools.command.install import install as _install
from setuptools import Extension, setup, Command
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools import find_packages
requires = []
setup_requires = ['numpy>=1.13.0,<1.15.3; python_version <= "2.7"',
'numpy>=1.13.0; python_version > "3.0"']
install_requires = setup_requires + ['Mako>=1.0.1',
'cython',
'decorator>=3.4.2',
'scipy>=0.16.0; python_version >= "3.5"',
'scipy>=0.16.0,<1.3.0; python_version <= "3.4"',
'matplotlib>=1.5.1',
'pillow',
'h5py>=2.5',
'jinja2',
'astropy>=2.0.3,<3.0.0; python_version <= "2.7"',
'astropy>=2.0.3; python_version > "3.0"',
'mpld3>=0.3',
'lscsoft-glue>=1.59.3',
'emcee==2.2.1',
'requests>=1.2.1',
'beautifulsoup4>=4.6.0',
'six>=1.10.0',
'ligo-segments',
'tqdm',
'weave>=0.16.0; python_version <= "2.7"',
]
def find_files(dirname, relpath=None):
def find_paths(dirname):
items = []
for fname in os.listdir(dirname):
path = os.path.join(dirname, fname)
if os.path.isdir(path):
items += find_paths(path)
elif not path.endswith(".py") and not path.endswith(".pyc"):
items.append(path)
return items
items = find_paths(dirname)
if relpath is None:
relpath = dirname
return [os.path.relpath(path, relpath) for path in items]
class cbuild_ext(_build_ext):
def run(self):
import pkg_resources
# At this point we can be sure pip has already installed numpy
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.run(self)
# Add swig-generated files to the list of things to clean, so they
# get regenerated each time.
class clean(_clean):
def finalize_options (self):
_clean.finalize_options(self)
self.clean_files = []
self.clean_folders = ['docs/_build']
def run(self):
_clean.run(self)
for f in self.clean_files:
try:
os.unlink(f)
print('removed {0}'.format(f))
except:
pass
for fol in self.clean_folders:
shutil.rmtree(fol, ignore_errors=True)
print('removed {0}'.format(fol))
# write versioning info
def get_version_info():
"""Get VCS info and write version info to version.py
"""
from pycbc import _version_helper
class vdummy(object):
def __getattr__(self, attr):
return ''
# If this is a pycbc git repo always populate version information using GIT
try:
vinfo = _version_helper.generate_git_version_info()
except:
vinfo = vdummy()
vinfo.version = '1.14.dev2'
vinfo.release = 'False'
with open('pycbc/version.py', 'w') as f:
f.write("# coding: utf-8\n")
f.write("# Generated by setup.py for PyCBC on %s.\n\n"
% vinfo.build_date)
# print general info
f.write('version = \'%s\'\n' % vinfo.version)
f.write('date = \'%s\'\n' % vinfo.date)
f.write('release = %s\n' % vinfo.release)
f.write('last_release = \'%s\'\n' % vinfo.last_release)
# print git info
f.write('\ngit_hash = \'%s\'\n' % vinfo.hash)
f.write('git_branch = \'%s\'\n' % vinfo.branch)
f.write('git_tag = \'%s\'\n' % vinfo.tag)
f.write('git_author = \'%s\'\n' % vinfo.author)
f.write('git_committer = \'%s\'\n' % vinfo.committer)
f.write('git_status = \'%s\'\n' % vinfo.status)
f.write('git_builder = \'%s\'\n' % vinfo.builder)
f.write('git_build_date = \'%s\'\n' % vinfo.build_date)
f.write('git_verbose_msg = """Version: %s\n'
'Branch: %s\n'
'Tag: %s\n'
'Id: %s\n'
'Builder: %s\n'
'Build date: %s\n'
'Repository status is %s"""\n' %(
vinfo.version,
vinfo.branch,
vinfo.tag,
vinfo.hash,
vinfo.builder,
vinfo.build_date,
vinfo.status))
f.write('from pycbc._version import *\n')
version = vinfo.version
from pycbc import version
version = version.version
return version
class build_docs(Command):
user_options = []
description = "Build the documentation pages"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call("cd docs; cp Makefile.std Makefile; cp conf_std.py conf.py; sphinx-apidoc "
" -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc && make html",
stderr=subprocess.STDOUT, shell=True)
class build_gh_pages(Command):
user_options = []
description = "Build the documentation pages for GitHub"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call("mkdir -p _gh-pages/latest && touch _gh-pages/.nojekyll && "
"cd docs; cp Makefile.gh_pages Makefile; cp conf_std.py conf.py; sphinx-apidoc "
" -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc && make html",
stderr=subprocess.STDOUT, shell=True)
cmdclass = { 'build_docs' : build_docs,
'build_gh_pages' : build_gh_pages,
'clean' : clean,
'build_ext':cbuild_ext
}
extras_require = {'cuda': ['pycuda>=2015.1', 'scikit-cuda']}
# do the actual work of building the package
VERSION = get_version_info()
cythonext = ['waveform.spa_tmplt',
'waveform.utils',
'types.array',
'filter.matchedfilter',
'vetoes.chisq']
ext = []
cython_compile_args = ['-O3', '-w', '-msse4.2', '-ffast-math',
'-ffinite-math-only']
cython_link_args = []
# Mac's clang compiler doesn't have openMP support by default. Therefore
# disable openmp builds on MacOSX. Optimization should never really be a
# concern on that OS, and this line can be commented out if needed anyway.
if not sys.platform == 'darwin':
cython_compile_args += ['-fopenmp']
cython_link_args += ['-fopenmp']
for name in cythonext:
e = Extension("pycbc.%s_cpu" % name,
["pycbc/%s_cpu.pyx" % name.replace('.', '/')],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
# Not all modules work like this:
e = Extension("pycbc.fft.fftw_pruned_cython",
["pycbc/fft/fftw_pruned_cython.pyx"],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
e = Extension("pycbc.events.eventmgr_cython",
["pycbc/events/eventmgr_cython.pyx"],
extra_compile_args=cython_compile_args,
extra_link_args=cython_link_args,
compiler_directives={'embedsignature': True})
ext.append(e)
setup (
name = 'PyCBC',
version = VERSION,
description = 'Core library to analyze gravitational-wave data, find signals, and study their parameters.',
long_description = open('descr.rst').read(),
author = 'The PyCBC team',
author_email = '[email protected]',
url = 'http://www.pycbc.org/',
download_url = 'https://github.com/gwastro/pycbc/tarball/v%s' % VERSION,
keywords = ['ligo', 'physics', 'gravity', 'signal processing', 'gravitational waves'],
cmdclass = cmdclass,
setup_requires = setup_requires,
extras_require = extras_require,
install_requires = install_requires,
scripts = find_files('bin', relpath='./'),
packages = find_packages(),
package_data = {'pycbc.workflow': find_files('pycbc/workflow'),
'pycbc.results': find_files('pycbc/results'),
'pycbc.tmpltbank': find_files('pycbc/tmpltbank')},
ext_modules = ext,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
)
| gpl-3.0 |
TinyOS-Camp/DDEA-DEV | Development/ddea.py | 2 | 10629 | #!/adsc/DDEA_PROTO/bin/python
# coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
##################################################################
# General Moduels
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import calendar
import random
from matplotlib.collections import LineCollection
import pprint
import radar_chart
# Custom library
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
from data_summerization import *
##################################################################
# Interactive mode for plotting
plt.ion()
##################################################################
# Processing Configuraiton Settings
##################################################################
# all BEMS and weather data is built into a signel variable, 'data_dict'.
# 'data_dict' is a 'dictionary' data structure of python.
# For debugging or experiment purpose, the program allows to store 'data_dict' variable
# to data_dict.bin by setting a flag variabe, IS_USING_SAVED_DICT
# IS_USING_SAVED_DICT=0 (Default) : Build a new 'data_dict' variabe and store it to 'data_dict.bin'
# IS_USING_SAVED_DICT=1 : Skip to build 'data_dict' and load 'data_dict.bin' instead
# IS_USING_SAVED_DICT=-1 : Neither build nor load 'data_dict'
# Default flag for processing
PRE_BN_STAGE=0
if PRE_BN_STAGE>0:
IS_USING_SAVED_DICT=1
CHECK_DATA_FORMAT=0
Data_Summarization=1
# Setting Analysis period where ANS_START_T and ANS_START_T are the starting and
# and the ending timestamp.
ANS_START_T=dt.datetime(2013,6,1,0)
ANS_END_T=dt.datetime(2013,12,1,0)
# Setting for analysis time interval where all BEMS and weather data is aligned
# for a slotted time line quantized by TIMELET_INV.
TIMELET_INV=dt.timedelta(minutes=60)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
# Compute Average Feature if PROC_AVG ==True
PROC_AVG=True
# Compute Differential Feature if PROC_DIFF ==True
PROC_DIFF=True
##################################################################
# List buildings and substation names
# Skip all data PRE_BN_STAGE
#['GW1','GW2','VAK1','VAK2']
bldg_key_set=['GW2']
if PRE_BN_STAGE==0:
bldg_key_set_run=[]
print 'skip PRE_BN_STAGE....'
else:
bldg_key_set_run=bldg_key_set
# Retrieving a set of sensors having a key value in bldg_key_set
for bldg_key in bldg_key_set_run:
print '###############################################################################'
print '###############################################################################'
print 'Processing '+ bldg_key+'.....'
print '###############################################################################'
print '###############################################################################'
#temp=subprocess.check_output('ls '+DATA_DIR+'*'+bldg_key+'*.bin', shell=True)
temp=subprocess.check_output('ls '+DATA_DIR+'*'+bldg_key+'*.bin | grep POWER', shell=True)
input_files_temp =shlex.split(temp)
# Get rid of duplicated files
input_files_temp=list(set(input_files_temp))
input_files=input_files_temp
###############################################################################
# This directly searches files from bin file name
print '###############################################################################'
print '# Data Pre-Processing'
print '###############################################################################'
# define input_files to be read
if IS_USING_SAVED_DICT==0:
print 'Extract a common time range...'
ANS_START_T,ANS_END_T,input_file_to_be_included=\
time_range_check(input_files,ANS_START_T,ANS_END_T,TIMELET_INV)
print 'time range readjusted to (' ,ANS_START_T, ', ', ANS_END_T,')'
start__dictproc_t=time.time()
data_dict,purge_list=\
construct_data_dict(input_file_to_be_included,ANS_START_T,ANS_END_T,TIMELET_INV,\
binfilename=PROC_OUT_DIR + 'data_dict',IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast(PROC_OUT_DIR +'data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
if CHECK_DATA_FORMAT==1:
# This is for data verification purpose
# You cab skip it if you are sure that there would be no bug in the 'construct_data_dict' function.
list_of_wrong_data_format=verify_data_format(data_dict)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
# This perform data summerization process.
if Data_Summarization==1:
bldg_out=data_summerization(bldg_key,data_dict,PROC_AVG=True,PROC_DIFF=True)
RECON_BLDG_BIN_OUT=0
if RECON_BLDG_BIN_OUT==1:
for bldg_key in ['GW1_','GW2_','VAK1_','VAK2_']:
avgdata_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'avgdata_dict.bin')
diffdata_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'diffdata_dict.bin')
data_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'data_dict.bin')
cmd_str=remove_dot(bldg_key)+'out={\'data_dict\':data_dict}'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'avgdata_dict\':avgdata_dict})'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'diffdata_dict\':diffdata_dict})'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'bldg_key\':remove_dot(bldg_key)})'
exec(cmd_str)
cmd_str='mt.saveObjectBinaryFast('+remove_dot(bldg_key)+'out'+',\''+PROC_OUT_DIR+remove_dot(bldg_key)+'out.bin\')'
exec(cmd_str)
print '###############################################################################'
print '# Model_Discovery'
print '###############################################################################'
#bldg_key_set=['GW1','GW2','VAK1','VAK2']
Model_Discovery=1
pwr_key='_POWER_';
bldg_dict={}
for bldg_load_key in bldg_key_set:
print 'Building for ',bldg_load_key, '....'
try:
bldg_tag='vtt_'+bldg_load_key
bldg_load_out=mt.loadObjectBinaryFast(PROC_OUT_DIR+bldg_load_key+'_out.bin')
except:
print bldg_load_key+' bin file not found in PROC_OUT_DIR, skip....'
pass
mt.saveObjectBinaryFast(bldg_load_out['data_dict'],PROC_OUT_DIR+'data_dict.bin')
if 'avgdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],PROC_OUT_DIR+'avgdata_dict.bin')
if 'diffdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['diffdata_dict'],PROC_OUT_DIR+'diffdata_dict.bin')
pname_key= pwr_key
bldg_dict.update({bldg_tag:create_bldg_obj(PROC_OUT_DIR,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
#cmd_str='bldg_.'+bldg_tag+'.data_out=obj(bldg_load_out)'
#exec(cmd_str)
cmd_str='bldg_obj=bldg_.'+bldg_tag
exec(cmd_str)
anal_out={}
if 'avgdata_dict' in bldg_load_out.keys():
anal_out.update({'avg':bn_prob_analysis(bldg_obj,sig_tag_='avg')})
if 'diffdata_dict' in bldg_load_out.keys():
anal_out.update({'diff':bn_prob_analysis(bldg_obj,sig_tag_='diff')})
cmd_str='bldg_.'+bldg_tag+'.anal_out=obj(anal_out)'
exec(cmd_str)
# Save vtt building object file.
mt.saveObjectBinaryFast(bldg_ ,PROC_OUT_DIR+'vtt_bldg_obj.bin')
# this is a vtt specific sensor name conversion
def convert_vtt_name(id_labels):
if isinstance(id_labels,list)==False:
id_labels=[id_labels]
out_name=[key_label_ for key_label_ in id_labels ]
return out_name
bldg_.convert_name=convert_vtt_name
#######################################################################################
# Analysis For VTT
#######################################################################################
# Analysis of BN network result - All result will be saved in fig_dir.
BN_ANAL=1
if BN_ANAL==1:
# Plotting individual LHs
PLOTTING_LH=0
if PLOTTING_LH==1:
plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='time',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
PLOTTING_BN=1
if PLOTTING_BN==1:
plotting_bldg_bn(bldg_)
print '**************************** End of Program ****************************'
| gpl-2.0 |
CVML/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
amueller/scipy-2016-sklearn | notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| cc0-1.0 |
rubikloud/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/tree/export.py | 53 | 15772 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _tree.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
davharris/leafpuppy | train_dots.py | 1 | 2641 | #!/bin/python
import pylab as pl
import cPickle
import matplotlib.pyplot as plt
from sklearn import svm, metrics
import numpy as np
import sys
square = 13
imgloc = '../images/v012-penn.10-1hA5D1-cropb.png'
resd={'dot':0,'noise':1,'vein':2}
currimg=plt.imread(imgloc)
pkl_file=open('dots.pkl', 'r')
dots = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('noise.pkl', 'r')
noise = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('veins.pkl','r')
veins = cPickle.load(pkl_file)
pkl_file.close()
#dots = zip(dots, [0 for i in range(len(dots))])
#noise = zip(noise, [1 for i in range(len(noise))])
#veins = zip(veins, [2 for i in range(len(veins))])
print np.shape(np.asarray(dots))
print np.shape(np.asarray(noise))
print np.shape(np.asarray(veins))
dots_data = np.asarray(dots).reshape((len(dots),-1))
noise_data= np.asarray(noise).reshape((len(noise),-1))
veins_data= np.asarray(veins).reshape((len(veins),-1))
data = np.concatenate((np.concatenate((dots_data,noise_data)),veins_data))
print len(data)
target = [resd['dot'] for i in range(len(dots_data))] + [resd['noise'] for i in range(len(noise_data))] + [resd['vein'] for i in range(len(veins_data))]
print len(target)
classifier = svm.SVC(gamma=0.001)
classifier.fit(data, target)
tmpx, tmpy = len(currimg[0][:]), len(currimg[:][0])
final_image=np.ones((tmpy,tmpx))
blocks=[]
print 'Going through the blocks...'
sys.stdout.flush()
for i in [i+square/2 for i in xrange(tmpy-square)]:
for j in [j+square/2 for j in xrange(tmpx-square)]:
currblock=currimg[i-square/2:i+square/2+1,j-square/2:j+square/2+1]
blocks.append(currblock)
blocks=np.asarray(blocks)
print np.shape(blocks)
blocks = np.asarray(blocks).reshape(len(blocks),-1)
print np.shape(blocks)
print 'About to make predictions...'
sys.stdout.flush()
predicted = classifier.predict(blocks)
voting = np.zeros((tmpy, tmpx, 3))
print 'About to count votes...'
sys.stdout.flush()
for p in xrange(len(predicted)):
j=p%(tmpx-square)+square/2
i=(p-j+square/2)/(tmpx-square)+square/2
#[i,j] are the coordinates of the center of that box
#since p=(i-s/2)(X-s)+j-s/2
for y in range(i-square/2,i+square/2):
for x in range(j-square/2,j+square/2):
voting[y,x][predicted[p]]+=1
for i in xrange(tmpy):
for j in xrange(tmpx):
if voting[i,j].argmax()==resd['vein']:
final_image[i,j]=0
plt.imshow(final_image, cmap=plt.cm.gray)
plt.show()
#for i in [i+square/2 for i in xrange(tmpx-square)]:
# for j in [j+square/2 for j in xrange(tmpy-square)]:
# for k in range(i-square/2,i+square/2+1):
# for
| bsd-3-clause |
MycChiu/tensorflow | tensorflow/examples/learn/iris.py | 5 | 1649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import model_selection
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = tf.contrib.learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
hijinks/python-bcet | bcet.py | 1 | 3886 | #!/usr/bin/env python
# BCET Workflow
__author__ = 'Sam Brooke'
__date__ = 'September 2017'
__copyright__ = '(C) 2017, Sam Brooke'
__email__ = "[email protected]"
import os
import gdal
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
import georasters as gr
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser
import json
import re
parser = OptionParser()
parser.add_option("-c", "--cloud", dest="cloud", help="Location of cloud raster", metavar="CLOUD")
(options, args) = parser.parse_args()
cloud_tif = False
if options.cloud:
if os.path.isfile(options.cloud):
cloud_tif = options.cloud
# args[0] for config file
# args[1] for LANDSAT TIF
# args[2] for file prefix
config_file = False
landsat_raster = False
file_prefix = ''
if len(args) > 1:
if os.path.isfile(args[0]):
config_file = args[0]
if os.path.isfile(args[1]):
landsat_raster = args[1]
output_dir = args[2]
m = re.search(r"B[0-9]+",landsat_raster)
band_name = m.group()
config_data = False
if config_file:
with open(config_file) as data_file:
config_data = json.load(data_file)
keys = config_data.keys()
# ROI (Region Of Interest) coordinates
roi = False
run_title = 'untitled'
if 'roi' in keys:
top_left = config_data['roi']['top_left']
bottom_right = config_data['roi']['bottom_right']
roi = True
if 'name' in keys:
run_title = config_data['name']
if roi:
# Create Polygon of ROI
roi_poly = Polygon([(top_left[0], top_left[1]), (bottom_right[0], top_left[1]), (bottom_right[0], bottom_right[1]), (top_left[0], bottom_right[1])])
#
# Load and Process Rasters
#
# Load Raster
raster = os.path.join(landsat_raster)
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(raster) # Raster information
# ndv = no data value
data = gr.from_file(raster) # Create GeoRaster object
crs = projection.ExportToProj4() # Create a projection string in proj4 format
if roi:
print('Clipping ROI')
# Create GeoDataFrame of ROI polygon with correction projection
clip_df = GeoDataFrame(crs=crs, geometry=[roi_poly])
raster_clip = data.clip(clip_df)
raster_data = raster_clip[0].raster
else:
raster_data = data.raster
if cloud_tif:
print('Preparing cloud mask')
# Cloud mask layer
# See
cloud_mask = os.path.join(cloud_tif)
cloud_mask_gr = gr.from_file(cloud_mask)
cloud_clip = cloud_mask_gr.clip(clip_df)
cloud_data = cloud_clip[0].raster
# Mask invalid data
raster_masked = np.ma.masked_invalid(raster_data, copy=True)
if cloud_tif:
print('Running cloud mask')
# Mask clouds, snow and shadows
raster_processed = np.where(cloud_data == 1, raster_masked, ndv)
else:
raster_processed = raster_masked
#
# BCET algebra
#
print('BCETing')
s = np.mean(np.power(raster_processed,2)) # mean squared
e = np.mean(raster_processed)
l = np.min(raster_processed)
h = np.max(raster_processed)
L = 0 # output minimum
H = 255 # output maximum
E = 110 # output mean
# Find b
b_nom = ((h**2)*(E-L))-(s*(H-L))+((l**2)*(H-E))
b_den = 2*((h*(E-L))-(e*(H-L))+(l*(H-E)))
b = b_nom/b_den
# Find a
a1 = H-L
a2 = h-l
a3 = h+l-(2*b)
a = a1/(a2*a3)
# Find c
c = L-(a*(l-b)**2)
# Process raster
bcet_raster = a*((raster_processed - b)**2) + c
print('New average value:')
print(bcet_raster.mean()) # should be 110!
#
# Output
#
output_gr = gr.GeoRaster(bcet_raster,
raster_clip[0].geot,
nodata_value=ndv,
projection=raster_clip[0].projection,
datatype=raster_clip[0].datatype)
output_dir_full = os.path.join(output_dir,run_title)
if not os.path.exists(output_dir_full):
os.makedirs(output_dir_full)
new_path = os.path.join(output_dir_full,run_title+'_'+band_name)
print('Outputing '+new_path+' ...')
# Make Geotiff
output_gr.to_tiff(new_path)
# Add some metadata
ds = gdal.Open(new_path+'.tif', gdal.GA_Update)
ds.SetMetadataItem('ORIGINAL_LANDSAT', os.path.basename(landsat_raster))
ds.FlushCache()
| mit |
mxjl620/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
SmokinCaterpillar/pypet | pypet/tests/unittests/shared_data_test.py | 1 | 37080 | __author__ = ('Robert Meyer', 'Mehmet Nevvaf Timur')
import sys
import unittest
import os
import platform
import numpy as np
import pandas as pd
import tables as pt
from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, \
make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, \
SharedVLArray
from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite
from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest
from pypet.tests.testutils.data import TrajectoryComparator
from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager
class MyTable(pt.IsDescription):
id = pt.Int32Col()
name = pt.StringCol(15)
surname = pt.StringCol(15)
weight = pt.FloatCol()
class StorageDataTrajectoryTests(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5'
def test_conversions(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_standard_result = SharedResult
traj.f_store(only_init=True)
traj.f_add_result('shared_data')
thedata = np.zeros((1000, 1000))
myarray = SharedArray('array', traj.shared_data, trajectory=traj)
traj.shared_data['array'] = myarray
mytable = SharedTable('t1', traj.shared_data, trajectory=traj)
traj.shared_data['t1'] = mytable
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
res = traj.f_add_result('shared.dfs')
res['df'] = SharedPandasFrame()
res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj)
frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj,
add_to_parent=True)
frame.create_shared_data(data=pd.DataFrame(dadict2),)
res['df1'] = frame
traj.f_add_result('mylist', [1, 2, 3])
traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42)
traj.f_add_result('my.myarray', np.zeros((50, 50)))
traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2))
traj.f_add_result('my.mytable', ObjectTable(data=dadict2))
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
traj.f_store()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
make_ordinary_result(traj.shared_data, 'array', trajectory=traj)
array = traj.shared_data.array
self.assertTrue(isinstance(array, np.ndarray))
thedata[2, 2] = 10
self.assertTrue(np.all(array == thedata))
make_ordinary_result(traj.shared_data, 't1', trajectory=traj,)
t1 = traj.shared_data.t1
self.assertTrue(isinstance(t1, ObjectTable))
self.assertTrue(np.all(t1['huhu'][0] == np.ones(3)))
dfs = traj.shared.dfs
make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj)
theframe = dfs.f_get('df')
self.assertTrue(isinstance(dfs, Result))
self.assertTrue(isinstance(theframe, pd.DataFrame))
self.assertTrue(theframe['hi'][0] == 1)
listres = traj.f_get('mylist')
listres = make_shared_result(listres, 0, trajectory=traj)
with StorageContextManager(traj):
self.assertTrue(listres[0][2] == 3)
listres[0][0] = 4
self.assertTrue(listres[0][0] == 4)
listres = make_ordinary_result(listres, 0, trajectory=traj)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
mylist = traj.mylist
self.assertTrue(isinstance(listres, Result))
self.assertTrue(mylist[0] == 4)
self.assertTrue(isinstance(mylist, list))
mytuple = traj.mytuple
with self.assertRaises(AttributeError):
mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray)
mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray)
self.assertTrue(mytuple.k[1] == 2)
mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj)
self.assertTrue(isinstance(mytuple.k, tuple))
self.assertTrue(mytuple.k[2] == 3)
myframe = traj.myframe
myframe = make_shared_result(myframe, 'data', traj)
theframe = myframe.data.read()
self.assertTrue(theframe['answer'][0] == 42)
myframe = make_ordinary_result(myframe, 'data', trajectory=traj)
traj.f_load_item(myframe)
self.assertTrue(myframe.data['answer'][0] == 42)
mytable = traj.f_get('mytable')
mytable = make_shared_result(mytable, 0, traj)
self.assertTrue(isinstance(mytable[0], SharedTable))
rows = mytable.mytable.read()
self.assertTrue(rows[0][0] == 42)
mytable = make_ordinary_result(mytable, 0, trajectory=traj)
self.assertTrue(isinstance(mytable, Result))
self.assertTrue(mytable[0]['answer'][0] == 42)
def test_storing_and_manipulating(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
thedata = np.zeros((1000, 1000))
res = traj.f_add_result(SharedResult, 'shared')
myarray = SharedArray('array', res, trajectory=traj, add_to_parent=True)
mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True)
mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True)
mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True)
traj.f_store(only_init=True)
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
mytable2.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
mytable3.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
traj.f_store()
newrow = {'ha': 'hu', 'haha': 4.0}
with self.assertRaises(TypeError):
traj.shared.t2.row
with StorageContextManager(traj) as cm:
row = traj.shared.t2.row
for irun in range(11):
for key, val in newrow.items():
row[key] = val
row.append()
traj.shared.t3.flush()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename)
traj.f_load(load_data=2)
traj.shared.t2.traj = traj
traj.shared.t1.traj = traj
traj.shared.array.traj = traj
self.assertTrue(traj.shared.t2.nrows == 11, '%s != 11' % str(traj.shared.t2.nrows))
self.assertTrue(traj.shared.t2[0]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[0]['ha'])
self.assertTrue(traj.shared.t2[1]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[1]['ha'])
self.assertTrue('huhu' in traj.shared.t1.colnames)
self.assertTrue(traj.shared.array[2, 2] == 10)
@unittest.skipIf(platform.system() == 'Windows', 'Not supported under Windows')
def test_compacting(self):
filename = make_temp_dir('hdf5compacting.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_storage_service.complevel = 7
first_row = {'ha': 'hi'.encode('utf-8'), 'haha': np.zeros((3, 3))}
traj.f_store(only_init=True)
traj.f_add_result('My.Tree.Will.Be.Deleted', 42)
traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!')
res = traj.f_add_result(SharedResult, 'myres')
res['myres'] = SharedTable()
res['myres'].create_shared_data(first_row=first_row)
with StorageContextManager(traj):
traj.myres
for irun in range(10000):
row = traj.myres.row
for key in first_row:
row[key] = first_row[key]
row.append()
traj.f_store()
del traj
traj = load_trajectory(name=trajname, filename=filename, load_all=2)
with StorageContextManager(traj) as cm:
tb = traj.myres.get_data_node()
tb.remove_rows(1000, 10000)
cm.flush_store()
self.assertTrue(traj.myres.nrows == 1001)
traj.f_delete_item(traj.My, recursive=True)
traj.f_delete_item(traj.Mine, recursive=True)
size = os.path.getsize(filename)
get_root_logger().info('Filesize is %s' % str(size))
name_wo_ext, ext = os.path.splitext(filename)
backup_file_name = name_wo_ext + '_backup' + ext
code = compact_hdf5_file(filename, keep_backup=True)
if code != 0:
raise RuntimeError('ptrepack fail')
backup_size = os.path.getsize(backup_file_name)
self.assertTrue(backup_size == size)
new_size = os.path.getsize(filename)
get_root_logger().info('New filesize is %s' % str(new_size))
self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def test_all_arrays(self):
filename = make_temp_dir('hdf5arrays.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
traj.f_store(only_init=True)
res = traj.f_add_result(SharedResult, 'arrays')
res['carray'] = SharedCArray()
res['carray'].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
res['earray'] = SharedEArray()
res['earray'].create_shared_data(obj=npearray)
res['vlarray'] = SharedVLArray()
res['vlarray'].create_shared_data(obj=thevlarray)
res['array'] = SharedArray()
res['array'].create_shared_data(data=npearray)
traj.f_store()
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
toappned = [44, 'k'.encode('utf-8')]
with StorageContextManager(traj):
a1 = traj.arrays.array
a1[0, 0, 0] = 4.0
a2 = traj.arrays.carray
a2[0, 1] = 4
a4 = traj.arrays.vlarray
a4.append(toappned)
a3 = traj.arrays.earray
a3.append(np.zeros((1, 10, 3)))
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
with StorageContextManager(traj):
a1 = traj.arrays.array
self.assertTrue(a1[0, 0, 0] == 4.0)
a2 = traj.arrays.carray
self.assertTrue(a2[0, 1] == 4)
a3 = traj.arrays.earray
self.assertTrue(a3.read().shape == (3, 10, 3))
a4 = traj.arrays.vlarray
for idx, x in enumerate(a4):
if idx == 0:
self.assertTrue(np.all(x == np.array(thevlarray)))
elif idx == 1:
self.assertTrue(np.all(x == np.array(toappned)))
else:
raise RuntimeError()
def test_df(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
traj.f_store()
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
traj.f_add_result(SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict))
traj.f_add_result(SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2))
traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame())
for irun in range(10):
traj.df3.append(traj.df1.read())
dframe = traj.df3.read()
self.assertTrue(len(dframe) == 10)
what = traj.df.select(where='index == 2')
self.assertTrue(len(what) == 1)
def test_errors(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
with self.assertRaises(TypeError):
traj.f_add_result(SharedResult, 'arrays.vlarray', SharedVLArray()).create_shared_data(obj=thevlarray)
traj.f_store()
traj.arrays.vlarray.create_shared_data(obj=thevlarray)
traj.f_add_result(SharedResult, 'arrays.array', SharedArray()).create_shared_data(data=npearray)
traj.arrays.f_add_result(SharedResult, 'super.carray', SharedCArray(),
comment='carray').create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
traj.arrays.f_add_result(SharedResult, 'earray', SharedEArray()).create_shared_data('earray',
obj=npearray)
traj.f_store()
with self.assertRaises(TypeError):
traj.arrays.array.iterrows()
with StorageContextManager(traj):
with self.assertRaises(RuntimeError):
with StorageContextManager(traj):
pass
self.assertTrue(traj.v_storage_service.is_open)
with self.assertRaises(RuntimeError):
StorageContextManager(traj).open_store()
self.assertFalse(traj.v_storage_service.is_open)
class SharedTableTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'table', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_table = SharedTable(name='table',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_table_read(self):
the_reading_table = self.traj.results.shared_data.table
self.assertTrue(the_reading_table is self.shared_table)
the_reading_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_reading_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_reading_table.flush()
for idx, row in enumerate(the_reading_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_table = traj2.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
second_reading_table.append([(21, 'aaa', 'bbb', 100)])
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_reading_table = traj3.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == third_reading_table.read()))
def test_table_append(self):
the_append_table = self.traj.results.shared_data.table
self.assertTrue(the_append_table is self.shared_table)
the_append_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_append_table.row
for i in range(15):
row['id'] = i * 2
row['name'] = 'name %d' % i
row['surname'] = '%d surname' % i
row['weight'] = (i*0.5 + 50.0)
row.append()
the_append_table.flush()
for idx, row in enumerate(the_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_append_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
second_append_table.append([(30, 'mehmet', 'timur', 65.5)])
self.assertEqual(second_append_table.read(field='id')[-1], 30)
self.assertEqual(second_append_table.read(field='name')[-1], 'mehmet'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='surname')[-1], 'timur'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='weight')[-1], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_append_table = traj3.results.shared_data.table
self.assertEqual((third_append_table.read(field='id')[-1]), 30)
self.assertEqual((third_append_table.read(field='name')[-1]), 'mehmet'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'timur'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5)
third_append_table.append([(33, 'Harrison', 'Ford', 95.5)])
self.assertEqual((third_append_table.read(field='id')[-1]), 33)
self.assertEqual((third_append_table.read(field='name')[-1]), 'Harrison'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'Ford'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5)
def test_table_iterrows(self):
the_iterrows_table = self.traj.results.shared_data.table
self.assertTrue(the_iterrows_table is self.shared_table)
the_iterrows_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_iterrows_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_iterrows_table.flush()
for idx, row in enumerate(the_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
def test_table_col(self):
the_col_table = self.traj.results.shared_data.table
self.assertTrue(the_col_table is self.shared_table)
the_col_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_col_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_col_table.flush()
for idx, row in enumerate(the_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_col_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertTrue(np.all(second_col_table.read(field='id') == second_col_table.col('id')))
self.assertTrue(np.all(second_col_table.read(field='name') == second_col_table.col('name')))
self.assertTrue(np.all(second_col_table.read(field='surname') == second_col_table.col('surname')))
self.assertTrue(np.all(second_col_table.read(field='weight') == second_col_table.col('weight')))
# def test_table_itersequence(self):
# pass
#
# def test_table_itersorted(self):
# pass
#
# def test_table_read_coordinates(self):
# pass
#
# def test_table_read_sorted(self):
# pass
def test_table_getitem(self):
the_getitem_table = self.traj.results.shared_data.table
self.assertTrue(the_getitem_table is self.shared_table)
the_getitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_getitem_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_getitem_table.flush()
for idx, row in enumerate(the_getitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_getitem_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_getitem_table.iterrows()):
self.assertTrue(np.all(second_getitem_table.read()[idx] == second_getitem_table[idx]))
second_getitem_table.append([(30, 'mehmet nevvaf', 'timur', 65.5)])
for idx, row in enumerate(second_getitem_table.iterrows(-1)):
self.assertEqual(row['id'], 30)
self.assertEqual(row['name'], 'mehmet nevvaf'.encode('utf-8'))
self.assertEqual(row['surname'], 'timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_getitem_table = traj3.results.shared_data.table
with StorageContextManager(traj3):
for idx, row in enumerate(third_getitem_table.iterrows()):
self.assertTrue(np.all(third_getitem_table.read()[idx] == third_getitem_table[idx]))
# def test_table_iter(self):
# pass
#
# def test_table_modify_column(self):
# pass
#
# def test_table_modify_columns(self):
# pass
#
# def test_table_modify_coordinates(self):
# pass
#
# def test_table_modify_rows(self):
# pass
#
# def test_table_remove_rows(self):
# pass
#
# def test_table_remove_row(self):
# pass
def test_table_setitem(self):
the_setitem_table = self.traj.results.shared_data.table
self.assertTrue(the_setitem_table is self.shared_table)
the_setitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_setitem_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_setitem_table.flush()
for idx, row in enumerate(the_setitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_table = traj2.results.shared_data.table
second_setitem_table[0] = [(100, 'Mehmet Nevvaf', 'TIMUR', 75.5)]
self.assertEqual(second_setitem_table.read(field='id')[0], 100)
self.assertEqual(second_setitem_table.read(field='name')[0], 'Mehmet Nevvaf'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_setitem_table = traj3.results.shared_data.table
self.assertEqual(third_setitem_table.read(field='id')[0], 100)
self.assertEqual(third_setitem_table.read(field='name')[0], 'Mehmet Nevvaf'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5)
# def test_table_get_where_list(self):
# pass
#
# def test_table_read_where(self):
# pass
def test_table_where(self):
the_where_table = self.traj.results.shared_data.table
self.assertTrue(the_where_table is self.shared_table)
the_where_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_where_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_where_table.flush()
for idx, row in enumerate(the_where_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_where_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
result = second_where_table.where('(id == 2)&(name == b"mehmet 2")&(surname ==b"Timur")&(weight == 67.5)')
there = False
for row in result:
there = True
self.assertTrue(there)
# def test_table_append_where(self):
# pass
#
# def test_table_will_query_use_indexing(self):
# pass
#
# def test_table_copy(self):
# pass
#
# def test_table_flush_rows_to_index(self):
# pass
#
# def test_table_get_enum(self):
# pass
#
# def test_table_reindex(self):
# pass
#
# def test_table_reindex_dirty(self):
# pass
#
# def test_table_remove_index(self):
# pass
#
# def test_table_create_index(self):
# pass
#
# def test_table_create_cindex(self):
# pass
#
# def test_table_colindexes(self):
# pass
#
# def test_table_cols(self):
# pass
#
# def test_table_row(self):
# pass
def test_table_flush(self):
the_flush_table = self.traj.results.shared_data.table
self.assertTrue(the_flush_table is self.shared_table)
the_flush_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_flush_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_flush_table.flush()
for idx, row in enumerate(the_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_flush_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
row = second_flush_table.row
for i in range(10, 11):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
second_flush_table.flush()
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
class SharedArrayTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'array', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_array = SharedArray(name='array',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_array_read(self):
the_reading_array = np.ones((100, 100)) * 4
first_reading_array = self.traj.results.shared_data.array
self.assertTrue(first_reading_array is self.shared_array)
first_reading_array.create_shared_data(obj=the_reading_array)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_array = traj2.shared_data.array.read()
self.assertTrue(np.all(the_reading_array == second_reading_array),
'%s != %s' % (str(the_reading_array), str(second_reading_array)))
def test_array_getitem(self):
the_getitem_array = np.array(range(100))
first_getitem_array = self.traj.results.shared_data.array
first_getitem_array.create_shared_data(obj=the_getitem_array)
for k in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[k], first_getitem_array[k])
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
for j in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[j], traj2.results.shared_data.array[j])
def test_array_getenum(self):
the_getenum_array = np.array(range(100))
first_getenum_array = self.traj.results.shared_data.array
first_getenum_array.create_shared_data(obj=the_getenum_array)
with self.assertRaises(TypeError):
first_getenum_array.get_enum()
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_enum_array = traj2.results.shared_data.array
with self.assertRaises(TypeError):
second_enum_array.get_enum()
def test_array_iterrows(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_setitem(self):
the_setitem_array = np.zeros((50, 50))
first_setitem_array = self.traj.results.shared_data.array
first_setitem_array.create_shared_data(obj=the_setitem_array)
first_setitem_array[2, 2] = 10
self.assertEqual(first_setitem_array[2, 2], 10)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_array = traj2.results.shared_data.array
self.assertEqual(second_setitem_array[2, 2], 10)
second_setitem_array[3, 3] = 17
self.assertEqual(second_setitem_array[3, 3], 17)
def test_array_iter(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == first_iterrows_array.read()))
for idx, row in enumerate(the_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == second_iterrows_array.read()))
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_len(self):
the_len_array = np.ones((100, 100))
first_len_array = self.traj.results.shared_data.array
self.assertTrue(first_len_array is self.shared_array)
first_len_array.create_shared_data(obj=the_len_array)
self.assertEqual(len(first_len_array), 100)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_len_array = traj2.results.shared_data.array
self.assertEqual(len(second_len_array), 100)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) | bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/Rain/Diurnal/sea_diurnal_rain_plot_domain_constrain_8and12km_southern_eastern_indian_ocean.py | 1 | 12866 | """
Load npy xy, plot and save
"""
import os, sys
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 101.866
lon_min = 80
lat_max= 5
lat_min=-10
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_southern_eastern_indian_ocean.npz" % (lat_min,lat_max, lon_min, lon_max)
# Make own time x-axis
utc_to_local=datetime.timedelta(hours=5, minutes=30)
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30)+utc_to_local, datetime.datetime(2011, 8, 22, 6, 30)+utc_to_local, timedelta(hours=1))
#d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
#############
# Make own time x-axis
#d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = [ 'dkmbq', 'dklzq' ] # Most of Params
experiment_ids_e = ['dklyu', 'dklwu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['sea']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm+utc_to_local, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
cmorph_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/CMORPH/Diurnal/'
cmorph_file = "cmorph_diurnal_average_southern_eastern_indian_ocean_polygon.npz"
plot_cmorph = np.load('%s%s_%s' % (cmorph_dir, ls, cmorph_file))
dates_cmorph=[]
p=[]
for dp in plot_cmorph['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_cmorph.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_cmorph['mean'][plot_cmorph['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_cmorph.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_cmorph['mean'][plot_cmorph['hour']==dp])
#print dates_trmm
a = np.argsort(dates_cmorph,axis=0)
d_cmorph = np.array(dates_cmorph)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_cmorph+utc_to_local, pl, label='CMORPH', linewidth=2, linestyle=':', marker='', markersize=2, fmt='', color='black')
legendEntries.append(l)
legendtext.append('CMORPH')
#l0=plt.legend(legendEntries, legendtext,title='', frameon=False, loc=9, bbox_to_anchor=(0, 0,1, 1))
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, loc=9, bbox_to_anchor=(0.31, 0,1, 1))
gsmap_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/GSMAP_Aug_Sep_2011/Diurnal/'
gsmap_file = "gsmap_diurnal_average_southern_eastern_indian_ocean_polygon.npz"
plot_gsmap = np.load('%s%s_%s' % (gsmap_dir, ls, gsmap_file))
dates_gsmap=[]
p=[]
for dp in plot_gsmap['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_gsmap.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_gsmap['mean'][plot_gsmap['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_gsmap.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_gsmap['mean'][plot_gsmap['hour']==dp])
#print dates_trmm
a = np.argsort(dates_gsmap,axis=0)
d_gsmap = np.array(dates_gsmap)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_gsmap+utc_to_local, pl, label='GSMAP', linewidth=2, linestyle='--', marker='', markersize=2, fmt='', color='black')
legendEntries.append(l)
legendtext.append('GSMAP')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_eastern_indian_ocean_notitle_8and12kmonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
plt.show()
#plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_southern_eastern_indian_ocean_8and12kmonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
chemelnucfin/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 1 | 13913 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(
num_features=5,
num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.compat.v1.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.compat.v1.app.run(main=main)
| apache-2.0 |
dhuang/incubator-airflow | setup.py | 1 | 10423 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
sendgrid = ['sendgrid>=5.2.0']
celery = [
'celery>=4.0.2',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.4.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'google-cloud-dataflow>=2.2.0',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=1.1.1']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2-binary>=2.7.4']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9']
salesforce = ['simple-salesforce>=0.72']
s3 = ['boto3>=1.0.0']
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.6']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto==1.1.19',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'qds-sdk>=1.9.6',
'rednose',
'paramiko',
'pysftp',
'requests_mock'
]
devel_minreq = devel + kubernetes + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + s3 + slack + crypto + oracle +
docker + ssh + kubernetes + celery + azure + redis + gcp_api + datadog +
zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + snowflake)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8',
'google-cloud-dataflow>=2.2.0']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.1.2',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12, <0.13',
'flask-appbuilder>=1.9.6, <2.0.0',
'flask-admin==1.4.1',
'flask-caching>=1.3.3, <1.4.0',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf>=0.14, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.2.0',
'sqlalchemy-utc>=0.9.0',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid' : sendgrid,
'slack': slack,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
'kubernetes': kubernetes,
'snowflake': snowflake
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
wmvanvliet/mne-python | mne/tests/test_dipole.py | 1 | 20452 | import os
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import matplotlib.pyplot as plt
import pytest
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events, Evoked)
from mne.dipole import get_phantom_dipoles, _BDIP_ERROR_KEYS
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import run_tests_if_main, requires_mne, run_subprocess
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.constants import FIFF
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
data_path = testing.data_path(download=False)
meg_path = op.join(data_path, 'MEG', 'sample')
fname_dip_xfit_80 = op.join(meg_path, 'sample_audvis-ave_xfit.dip')
fname_raw = op.join(meg_path, 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(meg_path, 'sample_audvis_trunc_set1.dip')
fname_bdip = op.join(meg_path, 'sample_audvis_trunc_set1.bdip')
fname_dip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.dip')
fname_bdip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.bdip')
fname_evo = op.join(meg_path, 'sample_audvis_trunc-ave.fif')
fname_evo_full = op.join(meg_path, 'sample_audvis-ave.fif')
fname_cov = op.join(meg_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(meg_path, 'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(meg_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert orig.name == new.name
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert len(dip) == n_dipoles
assert dip.pos.shape == (n_dipoles, 3)
assert dip.ori.shape == (n_dipoles, 3)
assert dip.gof.shape == (n_dipoles,)
assert dip.amplitude.shape == (n_dipoles,)
@testing.requires_testing_data
def test_io_dipoles(tmpdir):
"""Test IO for .dip files."""
dipole = read_dipole(fname_dip)
assert 'Dipole ' in repr(dipole) # test repr
out_fname = op.join(str(tmpdir), 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere, rank=dict(meg=len(evoked.data)))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting(tmpdir):
"""Test dipole fitting."""
amp = 100e-9
tempdir = str(tmpdir)
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True,
use_cps=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
with pytest.warns(RuntimeWarning, match='projection'):
dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd,
rank='info') # just to test rank support
assert isinstance(residual, Evoked)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0))
assert (data_rms > resi_rms * 0.95).all(), \
'%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert fwd['src'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert (dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did about as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
# XXX possibly some OpenBLAS numerical differences make
# things slightly worse for us
factor = 0.7
assert dists[0] / factor >= dists[1], 'dists: %s' % dists
assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
'gc-dists (ori): %s' % gc_dists
assert amp_errs[0] / factor >= amp_errs[1],\
'amplitude errors: %s' % amp_errs
# This one is weird because our cov/sim/picking is weird
assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
@testing.requires_testing_data
def test_dipole_fitting_fixed(tmpdir):
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert len(evoked_crop.times) == 1
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert isinstance(dip_seq, Dipole)
assert isinstance(resid, Evoked)
assert len(dip_seq.times) == 1
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert isinstance(dip_free, Dipole)
assert isinstance(resid_free, Evoked)
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert (np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid.data, resid_free.data[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert (isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid.data, resid_fixed.data[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed, tmpdir)
# bad resetting
evoked.info['bads'] = [evoked.ch_names[3]]
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
pytest.raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
# copying
dip_fixed_2 = dip_fixed.copy()
dip_fixed_2.data[:] = 0.
assert not np.isclose(dip_fixed.data, 0., atol=1e-20).any()
# plotting
plt.close('all')
dip_fixed.plot()
plt.close('all')
orig_times = np.array(dip_fixed.times)
shift_times = dip_fixed.shift_time(1.).times
assert_allclose(shift_times, orig_times + 1)
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@pytest.mark.slowtest # slow-ish on Travis OSX
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
assert isinstance(residual, Evoked)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert (min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
pytest.raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
evoked.info.normalize_proj()
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert ((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed(tmpdir):
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the object DipoleFixed
assert 'DipoleFixed ' in repr(dip)
_check_roundtrip_fixed(dip, tmpdir)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip, tmpdir):
"""Check roundtrip IO for fixed dipoles."""
tempdir = str(tmpdir)
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times, atol=1e-8)
assert dip_read.info['xplotter_layout'] == dip.info['xplotter_layout']
assert dip_read.ch_names == dip.ch_names
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert ch_1['ch_name'] == ch_2['ch_name']
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
pytest.raises(ValueError, get_phantom_dipoles, 0)
pytest.raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert pos.shape == (32, 3)
assert ori.shape == (32, 3)
@testing.requires_testing_data
def test_confidence(tmpdir):
"""Test confidence limits."""
evoked = read_evokeds(fname_evo_full, 'Left Auditory', baseline=(None, 0))
evoked.crop(0.08, 0.08).pick_types(meg=True) # MEG-only
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.04), 0.08)
dip_py = fit_dipole(evoked, cov, sphere)[0]
fname_test = op.join(str(tmpdir), 'temp-dip.txt')
dip_py.save(fname_test)
dip_read = read_dipole(fname_test)
with pytest.warns(RuntimeWarning, match="'noise/ft/cm', 'prob'"):
dip_xfit = read_dipole(fname_dip_xfit_80)
for dip_check in (dip_py, dip_read):
assert_allclose(dip_check.pos, dip_xfit.pos, atol=5e-4) # < 0.5 mm
assert_allclose(dip_check.gof, dip_xfit.gof, atol=5e-1) # < 0.5%
assert_array_equal(dip_check.nfree, dip_xfit.nfree) # exact match
assert_allclose(dip_check.khi2, dip_xfit.khi2, rtol=2e-2) # 2% miss
assert set(dip_check.conf.keys()) == set(dip_xfit.conf.keys())
for key in sorted(dip_check.conf.keys()):
assert_allclose(dip_check.conf[key], dip_xfit.conf[key],
rtol=1.5e-1, err_msg=key)
# bdip created with:
# mne_dipole_fit --meas sample_audvis_trunc-ave.fif --set 1 --meg --tmin 40 --tmax 95 --bmin -200 --bmax 0 --noise sample_audvis_trunc-cov.fif --bem ../../subjects/sample/bem/sample-1280-1280-1280-bem-sol.fif --origin 0\:0\:40 --mri sample_audvis_trunc-trans.fif --bdip sample_audvis_trunc_set1.bdip # noqa: E501
# It gives equivalent results to .dip in non-dipole mode.
# xfit bdip created by taking sample_audvis_trunc-ave.fif, picking MEG
# channels, writitng to disk (with MNE), then running xfit on 40-95 ms
# with a 3.3 ms step
@testing.requires_testing_data
@pytest.mark.parametrize('fname_dip_, fname_bdip_', [
(fname_dip, fname_bdip),
(fname_dip_xfit, fname_bdip_xfit),
])
def test_bdip(fname_dip_, fname_bdip_, tmpdir):
"""Test bdip I/O."""
# use text as veridical
with pytest.warns(None): # ignored fields
dip = read_dipole(fname_dip_)
# read binary
orig_size = os.stat(fname_bdip_).st_size
bdip = read_dipole(fname_bdip_)
# test round-trip by writing and reading, too
fname = tmpdir.join('test.bdip')
bdip.save(fname)
bdip_read = read_dipole(fname)
write_size = os.stat(str(fname)).st_size
assert orig_size == write_size
assert len(dip) == len(bdip) == len(bdip_read) == 17
dip_has_conf = fname_dip_ == fname_dip_xfit
for kind, this_bdip in (('orig', bdip), ('read', bdip_read)):
for key, atol in (
('pos', 5e-5),
('ori', 5e-3),
('gof', 0.5e-1),
('times', 5e-5),
('khi2', 1e-2)):
d = getattr(dip, key)
b = getattr(this_bdip, key)
if key == 'khi2' and dip_has_conf:
if d is not None:
assert_allclose(d, b, atol=atol,
err_msg='%s: %s' % (kind, key))
else:
assert b is None
if dip_has_conf:
# conf
conf_keys = _BDIP_ERROR_KEYS + ('vol',)
assert (set(this_bdip.conf.keys()) ==
set(dip.conf.keys()) ==
set(conf_keys))
for key in conf_keys:
d = dip.conf[key]
b = this_bdip.conf[key]
assert_allclose(d, b, rtol=0.12, # no so great, text I/O
err_msg='%s: %s' % (kind, key))
# Not stored
assert this_bdip.name is None
assert this_bdip.nfree is None
# Test whether indexing works
this_bdip0 = this_bdip[0]
_check_dipole(this_bdip0, 1)
run_tests_if_main()
| bsd-3-clause |
mblondel/scikit-learn | sklearn/datasets/tests/test_lfw.py | 50 | 6849 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
maminian/skewtools | scripts/pipe_exact_moments.py | 1 | 1608 | import h5py
from numpy import *
import sys
import scripts.skewtools as st
from matplotlib import pyplot
import scipy.special as sf # Special functions
#
nmax=1000
bzs = -sf.jn_zeros(1,nmax) # Tabulate the first nmax zeroes of Bessel -J_1 for the sums.
def m2(t,Pe=10.**4):
out = zeros(shape(t))
for i in range(nmax):
out += bzs[i]**-8 * exp(-bzs[i]**2*t)
# end for
out *= 128*Pe**2
out += -1./360*Pe**2 + 2*(1. + 1./48*Pe**2)*t
return out
# end def
def m3(t,Pe=10.**4):
out = zeros(shape(t))
for i in range(nmax):
out += (t*bzs[i]**-8 + 18*bzs[i]**-10 - 240*bzs[i]**-12)*exp(-bzs[i]**2*t)
# end for
out *= 128*Pe**3
out += 1./480*Pe**3*(t-17./112)
return out
# end def
# ---------------------------------------------
#
# Keep in mind these are for the flow u=2(1-r**2).
#
t,var,sk = st.importDatasets(sys.argv[1],'Time','Avgd_Variance','Avgd_Skewness')
fig,ax = pyplot.subplots(2,1)
tv = t[1:]
skv = sk[1:]
exact = m3(tv)/m2(tv)**1.5
ax[0].plot(tv,exact,label='Exact')
ax[0].scatter(tv,skv,facecolor=[0,0,0,0],edgecolor=[0,0,0,1],s=40,label='Monte Carlo')
ax[1].plot(tv,abs(exact-skv),color='red',label='Absolute error')
ax[1].plot(tv,abs(exact-skv)/abs(exact),color=[0.4,0,0.4],label='Relative error')
ax[0].set_xscale('log')
ax[1].set_xscale('log')
ax[1].set_yscale('log')
ax[0].set_xlim([tv[0],tv[-1]])
ax[1].set_xlim([tv[0],tv[-1]])
ax[0].set_ylim([-0.2,0.5])
ax[0].grid(True)
ax[1].grid(True)
ax[0].legend(loc='best')
ax[1].legend(loc='best')
pyplot.tight_layout()
pyplot.show(block=False)
| gpl-3.0 |
jenfly/atmos-tools | testing/testing-data-int_pres.py | 1 | 1921 | import numpy as np
import matplotlib.pyplot as plt
import xray
import atmos.utils as utils
import atmos.plots as ap
import atmos.data as dat
from atmos.constants import const as constants
from atmos.data import get_coord, int_pres
# ----------------------------------------------------------------------
# Read monthly mean climatologies and do some test calcs
# ----------------------------------------------------------------------
filename = 'data/more/ncep2_climatology_monthly.nc'
ds = dat.ncload(filename)
u = ds['u']
v = ds['v']
T = ds['T']
ps = ds['ps']
lat = get_coord(u, 'lat')
lon = get_coord(u, 'lon')
plev = get_coord(u, 'plev')
mon = ds['mon'].values
topo = dat.get_ps_clim(lat, lon) / 100
topo.units = 'hPa'
# ----------------------------------------------------------------------
# Correct for topography
u_orig = u
u = dat.correct_for_topography(u_orig, topo)
# ----------------------------------------------------------------------
# Integrated vertically dp/g
# DataArray
u_int = int_pres(u, pdim=-3)
# ndarray
u_int2 = int_pres(u.values, plev*100, pdim=-3)
p0=1e5
g = constants.g.values
scale = g/p0
m = 7
k = 5
cint=2
plt.figure(figsize=(7,10))
plt.subplot(311)
ap.contourf_latlon(u[m,k], clev=cint)
plt.subplot(312)
ap.contourf_latlon(scale*u_int[m], clev=cint)
plt.subplot(313)
ap.contourf_latlon(scale*u_int2[m], lat, lon, clev=cint)
# ----------------------------------------------------------------------
# Integrate over subset
# pmin = 400e2
# pmax = 600e2
# m, k = 3, 5
pmin, pmax = 600e2, 1000e2
m, k = 3, 2
scale = g/(pmax-pmin)
cint=1
u_int = int_pres(u, pdim=-3, pmin=pmin, pmax=pmax)
u_int2 = int_pres(u.values, plev*100, pdim=-3, pmin=pmin, pmax=pmax)
plt.figure(figsize=(7,10))
plt.subplot(311)
ap.contourf_latlon(u[m,k], clev=cint)
plt.subplot(312)
ap.contourf_latlon(scale*u_int[m], clev=cint)
plt.subplot(313)
ap.contourf_latlon(scale*u_int2[m], lat, lon, clev=cint)
| mit |
Microsoft/hummingbird | hummingbird/ml/convert.py | 1 | 11483 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Hummingbird main (converters) API.
"""
from copy import deepcopy
import numpy as np
from .operator_converters import constants
from ._parse import parse_sklearn_api_model, parse_onnx_api_model
from ._topology import convert as topology_converter
from ._utils import torch_installed, lightgbm_installed, xgboost_installed
from .exceptions import MissingConverter, MissingBackend
from .supported import backends
# Invoke the registration of all our converters.
from . import operator_converters # noqa
# Set up the converter dispatcher.
from .supported import xgb_operator_list # noqa
from .supported import lgbm_operator_list # noqa
def _is_onnx_model(model):
"""
Function returning whether the input model is an ONNX model or not.
"""
return type(model).__name__ == "ModelProto"
def _supported_backend_check(backend):
"""
Function used to check whether the specified backend is supported or not.
"""
if backend is None:
raise MissingBackend("Backend: {}".format(backend))
def _supported_backend_check_config(model, backend, extra_config):
"""
Function used to check whether the specified backend and configuration pair is supported or not.
"""
assert torch_installed(), "To use Hummingbird you need to install torch."
import onnx
import torch
if backend is torch.jit.__name__ and constants.TEST_INPUT not in extra_config:
raise RuntimeError("Backend {} requires test inputs. Please pass some test input to the convert.".format(backend))
if backend is onnx.__name__:
if constants.ONNX_INITIAL_TYPES not in extra_config and constants.TEST_INPUT not in extra_config:
raise RuntimeError("Cannot generate test input data for ONNX. Either pass some input data or the initial_types")
if _is_onnx_model(model) and constants.ONNX_INITIAL_TYPES not in extra_config and constants.TEST_INPUT not in extra_config:
raise RuntimeError(
"Cannot extract number of input features from the ONNX. Either pass some input data or the initial_types"
)
def _convert_sklearn(model, backend, test_input, device, extra_config={}):
"""
This function converts the specified *scikit-learn* (API) model into its *backend* counterpart.
The supported operators and backends can be found at `hummingbird.ml.supported`.
"""
assert model is not None
assert torch_installed(), "To use Hummingbird you need to install torch."
import torch
# Parse scikit-learn model as our internal data structure (i.e., Topology)
# We modify the scikit learn model during translation.
model = deepcopy(model)
topology = parse_sklearn_api_model(model, extra_config)
# Convert the Topology object into a PyTorch model.
hb_model = topology_converter(topology, backend, device, extra_config=extra_config)
return hb_model
def _convert_lightgbm(model, backend, test_input, device, extra_config={}):
"""
This function is used to generate a *backend* model from a given input [LightGBM] model.
[LightGBM]: https://lightgbm.readthedocs.io/
"""
assert (
lightgbm_installed()
), "To convert LightGBM models you need to install LightGBM (or `pip install hummingbird-ml[extra]`)."
return _convert_sklearn(model, backend, test_input, device, extra_config)
def _convert_xgboost(model, backend, test_input, device, extra_config={}):
"""
This function is used to generate a *backend* model from a given input [XGBoost] model.
[XGBoost]: https://xgboost.readthedocs.io/
"""
assert (
xgboost_installed()
), "To convert XGboost models you need to instal XGBoost (or `pip install hummingbird-ml[extra]`)."
# XGBoostRegressor and Classifier have different APIs for extracting the number of features.
# In the former case we need to infer them from the test_input.
if constants.N_FEATURES not in extra_config:
if "_features_count" in dir(model):
extra_config[constants.N_FEATURES] = model._features_count
elif test_input is not None:
if type(test_input) is np.ndarray and len(test_input.shape) == 2:
extra_config[constants.N_FEATURES] = test_input.shape[1]
else:
raise RuntimeError(
"XGBoost converter is not able to infer the number of input features.\
Apparently test_input is not an ndarray. \
Please fill an issue at https://github.com/microsoft/hummingbird/."
)
else:
raise RuntimeError(
"XGBoost converter is not able to infer the number of input features.\
Please pass some test_input to the converter."
)
return _convert_sklearn(model, backend, test_input, device, extra_config)
def _convert_onnxml(model, backend, test_input, device, extra_config={}):
"""
This function converts the specified [ONNX-ML] model into its *backend* counterpart.
The supported operators can be found at `hummingbird.ml.supported`.
"""
assert model is not None
assert torch_installed(), "To use Hummingbird you need to install torch."
import onnx
# The conversion requires some test input for tracing.
# Test inputs can be either provided or generate from the inital types.
# Get the initial types if any.
initial_types = None
if constants.ONNX_INITIAL_TYPES in extra_config:
initial_types = extra_config[constants.ONNX_INITIAL_TYPES]
# Generate some test input if necessary.
if test_input is None:
if backend == onnx.__name__:
assert (
initial_types is not None and not initial_types[0][1].shape is None
), "Cannot generate test input data. Initial_types do not contain shape information."
assert len(initial_types[0][1].shape) == 2, "Hummingbird currently support only inputs with len(shape) == 2."
from onnxconverter_common.data_types import FloatTensorType, Int32TensorType
test_input = np.random.rand(initial_types[0][1].shape[0], initial_types[0][1].shape[1])
extra_config[constants.N_FEATURES] = initial_types[0][1].shape[1]
if type(initial_types[0][1]) is FloatTensorType:
test_input = np.array(test_input, dtype=np.float32)
elif type(initial_types[0][1]) is Int32TensorType:
test_input = np.array(test_input, dtype=np.int32)
else:
raise RuntimeError(
"Type {} not supported. Please fill an issue on https://github.com/microsoft/hummingbird/.".format(
type(initial_types[0][1])
)
)
extra_config[constants.TEST_INPUT] = test_input
elif constants.N_FEATURES not in extra_config:
extra_config[constants.N_FEATURES] = test_input.shape[1]
# Set the initializers. Some converter requires the access to initializers.
initializers = {} if model.graph.initializer is None else {in_.name: in_ for in_ in model.graph.initializer}
extra_config[constants.ONNX_INITIALIZERS] = initializers
# Parse ONNX model as our internal data structure (i.e., Topology).
topology = parse_onnx_api_model(model)
# Convert the Topology object into a PyTorch model.
hb_model = topology_converter(topology, backend, device, extra_config=extra_config)
return hb_model
def convert(model, backend, test_input=None, device="cpu", extra_config={}):
"""
This function converts the specified input *model* into an implementation targeting *backend*.
*Convert* supports [Sklearn], [LightGBM], [XGBoost] and [ONNX] models.
For *LightGBM* and *XGBoost* currently only the Sklearn API is supported.
The detailed list of models and backends can be found at `hummingbird.ml.supported`.
The *onnx* backend requires either a test_input of a the initial types set through the exta_config parameter.
The *torch.jit* backend requires a test_input.
[Sklearn]: https://scikit-learn.org/
[LightGBM]: https://lightgbm.readthedocs.io/
[XGBoost]: https://xgboost.readthedocs.io/
[ONNX]: https://onnx.ai/
[ONNX-ML]: https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md
[ONNX operators]: https://github.com/onnx/onnx/blob/master/docs/Operators.md
Args:
model: An input model
backend: The target for the conversion
test_input: Some input data used to trace the model execution.
For the ONNX backend the test_input size is supposed to be as large as the expected batch size.
Multiple inputs can be passed as `tuple` objects.
For the moment only (`numpy`)`arrays` are supported.
device: The target device the model should be run. This parameter is only used by the *torch** backends, and
the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'.
extra_config: Extra configurations to be used by the individual operator converters.
The set of supported extra configurations can be found at `hummingbird.ml.supported`
Examples:
>>> pytorch_model = convert(sklearn_model,`torch`)
Returns:
A model implemented in *backend*, which is equivalent to the input model
"""
assert model is not None
# We destroy extra_config during conversion, we create a copy here.
extra_config = deepcopy(extra_config)
# Add test input as extra configuration for conversion.
if test_input is not None and constants.TEST_INPUT not in extra_config and len(test_input) > 0:
extra_config[constants.TEST_INPUT] = test_input
# Fix the test_input type
if constants.TEST_INPUT in extra_config:
if type(extra_config[constants.TEST_INPUT]) == list:
extra_config[constants.TEST_INPUT] = np.array(extra_config[constants.TEST_INPUT])
elif type(extra_config[constants.TEST_INPUT]) == tuple:
# We are passing multiple datasets.
assert all([len(input.shape) == 2 for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_FEATURES] = sum([input.shape[1] for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT])
test_input = extra_config[constants.TEST_INPUT]
# We do some normalization on backends.
backend = backend.lower()
backend = backends[backend]
# Check whether we actually support the backend.
_supported_backend_check(backend)
_supported_backend_check_config(model, backend, extra_config)
if type(model) in xgb_operator_list:
return _convert_xgboost(model, backend, test_input, device, extra_config)
if type(model) in lgbm_operator_list:
return _convert_lightgbm(model, backend, test_input, device, extra_config)
if _is_onnx_model(model):
return _convert_onnxml(model, backend, test_input, device, extra_config)
return _convert_sklearn(model, backend, test_input, device, extra_config)
| mit |
fabianp/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
rekka/intro-fortran-2016 | web/python/fdm-neumann.py | 1 | 2977 | import math
import numpy as np
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
N = 5
M = 6
h = 1. / M
grid = [(h * i, h * j) for i in range(0, M + 1) for j in range(0, N + 1)]
sx, sy = zip(*grid)
ax.scatter(sx, sy, marker='+')
boundary = [(0, h * j) for j in range(1, N + 1)] + [(1., h * j) for j in range(1, N + 1)]
ax.scatter(*zip(*boundary), marker='s')
outside= [(-h, h * j) for j in range(0, N + 1)] + [(1. + h, h * j) for j in range(0, N + 1)]
ax.scatter(*zip(*outside), marker='*', color='gray')
initial = [(h * i, 0) for i in range(0, M + 1)]
ax.scatter(*zip(*initial), marker='D')
xi, ti = h * 2, h * 2
stencil = [(xi, ti), (xi - h, ti - h), (xi, ti - h), (xi + h, ti - h)]
stx, sty = zip(*stencil)
ax.scatter(stx, sty, marker='o')
stencil_labels = ['$(x_k, t_{i+1})$', '$(x_{k -1}, t_{i})$', '$(x_{k}, t_{i})$', '$(x_{k + 1}, t_{i})$']
for i in range(len(stencil)):
x, y = stencil[i]
ax.annotate(stencil_labels[i], xy=stencil[i], xytext=(x + 0.01, y + (- 0.05 if (i > 0) else 0.03)))
for i in range(1, len(stencil)):
x, y = stencil[i]
xx, yy = stencil[0]
dx = xx - x
dy = yy - y
ax.annotate("", xy=(xx - dx * 0.1, yy - dy * 0.1), xytext=(x + dx * 0.1, y + dy * 0.1),
arrowprops=dict(arrowstyle="-|>", color='black'))
# boundary stencil
xi, ti = h * M, h * 2
stencil = [(xi, ti), (xi - h, ti - h), (xi, ti - h), (xi + h, ti - h)]
stx, sty = zip(*stencil)
ax.scatter(stx[1:2], sty[1:2], marker='o')
stencil_labels = ['$(x_{M+1}, t_{i+1})$', '$(x_{M}, t_{i})$', '$(x_{M+1}, t_{i})$', '$(x_{M+2}, t_{i})$']
for i in range(len(stencil)):
x, y = stencil[i]
ax.annotate(stencil_labels[i], xy=stencil[i], xytext=(x + 0.01, y + (- 0.05 if (i > 0) else 0.03)), color=('gray' if i == 3 else 'black'))
for i in range(1, len(stencil)):
x, y = stencil[i]
xx, yy = stencil[0]
dx = xx - x
dy = yy - y
ax.annotate("", xy=(xx - dx * 0.1, yy - dy * 0.1), xytext=(x + dx * 0.1, y + dy * 0.1),
arrowprops=dict(arrowstyle="-|>", linestyle=('dashed' if i == 3 else 'solid'), color=('gray' if i == 3 else 'black')))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_xticks([h * i for i in range(0, M + 1)])
ax.set_xticklabels(["$x_{}$".format(i + 1) for i in range(M+1)])
ax.set_yticks([h * i for i in range(0, N + 1)])
ax.set_yticklabels(["$t_{}$".format(i) for i in range(N+1)])
ax.spines['bottom'].set_bounds(-0.01, 1.01)
ax.spines['left'].set_bounds(-0.01, N * h + 0.01)
ax.set_xlim((0 - 0.05 - h, 1 + 0.05 + h))
ax.set_ylim((0 - 0.05, N * h + 0.05))
ax.set_xlabel('$x$', rotation=0)
ax.xaxis.set_label_coords(1.0, -0.025)
ax.set_ylabel('$t$', rotation=0)
ax.yaxis.set_label_coords(-0.025, 1.0)
plt.savefig('../img/fdm-neumann.svg')
# plt.show()
| mit |
xodus7/tensorflow | tensorflow/contrib/eager/python/examples/l2hmc/main.py | 10 | 7799 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC on simple Gaussian mixture model with TensorFlow eager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
tfe = tf.contrib.eager
def main(_):
tf.enable_eager_execution()
global_step = tf.train.get_or_create_global_step()
global_step.assign(1)
energy_fn, mean, covar = {
"scg": l2hmc.get_scg_energy_fn(),
"rw": l2hmc.get_rw_energy_fn()
}[FLAGS.energy_fn]
x_dim = 2
train_iters = 5000
eval_iters = 2000
eps = 0.1
n_steps = 10 # Chain length
n_samples = 200
record_loss_every = 100
dynamics = l2hmc.Dynamics(
x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
learning_rate = tf.train.exponential_decay(
1e-3, global_step, 1000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, dynamics=dynamics, global_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
sys.stdout.flush()
if not FLAGS.restore:
# Training
if FLAGS.use_defun:
# Use `tfe.deun` to boost performance when there are lots of small ops
loss_fn = tfe.defun(l2hmc.compute_loss)
else:
loss_fn = l2hmc.compute_loss
samples = tf.random_normal(shape=[n_samples, x_dim])
for i in range(1, train_iters + 1):
loss, samples, accept_prob = train_one_iter(
dynamics,
samples,
optimizer,
loss_fn=loss_fn,
global_step=global_step)
if i % record_loss_every == 0:
print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
i, loss.numpy(),
accept_prob.numpy().mean()))
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training loss", loss, step=global_step)
print("Training complete.")
sys.stdout.flush()
if FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" ".format(saved_path))
sys.stdout.flush()
# Evaluation
if FLAGS.use_defun:
# Use tfe.deun to boost performance when there are lots of small ops
apply_transition = tfe.defun(dynamics.apply_transition)
else:
apply_transition = dynamics.apply_transition
samples = tf.random_normal(shape=[n_samples, x_dim])
samples_history = []
for i in range(eval_iters):
samples_history.append(samples.numpy())
_, _, _, samples = apply_transition(samples)
samples_history = np.array(samples_history)
print("Sampling complete.")
sys.stdout.flush()
# Mean and covariance of target distribution
mean = mean.numpy()
covar = covar.numpy()
ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
print("First 25 entries of the auto-correlation spectrum: {}".format(
ac_spectrum[:25]))
ess = compute_ess(ac_spectrum)
print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
sys.stdout.flush()
if FLAGS.train_dir:
# Plot autocorrelation spectrum in tensorboard
plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)
for ac in ac_spectrum:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Autocorrelation", ac, step=plot_step)
plot_step.assign(plot_step + n_steps)
if HAS_MATPLOTLIB:
# Choose a single chain and plot the trajectory
single_chain = samples_history[:, 0, :]
xs = single_chain[:100, 0]
ys = single_chain[:100, 1]
plt.figure()
plt.plot(xs, ys, color="orange", marker="o", alpha=0.6) # Trained chain
plt.savefig(os.path.join(FLAGS.train_dir, "single_chain.png"))
def train_one_iter(dynamics,
x,
optimizer,
loss_fn=l2hmc.compute_loss,
global_step=None):
"""Train the sampler for one iteration."""
loss, grads, out, accept_prob = l2hmc.loss_and_grads(
dynamics, x, loss_fn=loss_fn)
optimizer.apply_gradients(
zip(grads, dynamics.trainable_variables), global_step=global_step)
return loss, out, accept_prob
def compute_ac_spectrum(samples_history, target_mean, target_covar):
"""Compute autocorrelation spectrum.
Follows equation 15 from the L2HMC paper.
Args:
samples_history: Numpy array of shape [T, B, D], where T is the total
number of time steps, B is the batch size, and D is the dimensionality
of sample space.
target_mean: 1D Numpy array of the mean of target(true) distribution.
target_covar: 2D Numpy array representing a symmetric matrix for variance.
Returns:
Autocorrelation spectrum, Numpy array of shape [T-1].
"""
# Using numpy here since eager is a bit slow due to the loop
time_steps = samples_history.shape[0]
trace = np.trace(target_covar)
rhos = []
for t in range(time_steps - 1):
rho_t = 0.
for tau in range(time_steps - t):
v_tau = samples_history[tau, :, :] - target_mean
v_tau_plus_t = samples_history[tau + t, :, :] - target_mean
# Take dot product over observation dims and take mean over batch dims
rho_t += np.mean(np.sum(v_tau * v_tau_plus_t, axis=1))
rho_t /= trace * (time_steps - t)
rhos.append(rho_t)
return np.array(rhos)
def compute_ess(ac_spectrum):
"""Compute the effective sample size based on autocorrelation spectrum.
This follows equation 16 from the L2HMC paper.
Args:
ac_spectrum: Autocorrelation spectrum
Returns:
The effective sample size
"""
# Cutoff from the first value less than 0.05
cutoff = np.argmax(ac_spectrum[1:] < .05)
if cutoff == 0:
cutoff = len(ac_spectrum)
ess = 1. / (1. + 2. * np.sum(ac_spectrum[1:cutoff]))
return ess
if __name__ == "__main__":
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"use_defun",
default=False,
help="[Optional] Use `tfe.defun` to boost performance")
flags.DEFINE_string(
"energy_fn",
default="scg",
help="[Optional] The energy function used for experimentation"
"Other options include `rw`")
FLAGS = flags.FLAGS
tf.app.run(main)
| apache-2.0 |
detrout/debian-statsmodels | statsmodels/sandbox/regression/example_kernridge.py | 39 | 1232 |
import numpy as np
import matplotlib.pyplot as plt
from .kernridgeregress_class import GaussProcess, kernel_euclid
m,k = 50,4
upper = 6
scale = 10
xs = np.linspace(1,upper,m)[:,np.newaxis]
#xs1 = xs1a*np.ones((1,4)) + 1/(1.0+np.exp(np.random.randn(m,k)))
#xs1 /= np.std(xs1[::k,:],0) # normalize scale, could use cov to normalize
##y1true = np.sum(np.sin(xs1)+np.sqrt(xs1),1)[:,np.newaxis]
xs1 = np.sin(xs)#[:,np.newaxis]
y1true = np.sum(xs1 + 0.01*np.sqrt(np.abs(xs1)),1)[:,np.newaxis]
y1 = y1true + 0.10 * np.random.randn(m,1)
stride = 3 #use only some points as trainig points e.g 2 means every 2nd
xstrain = xs1[::stride,:]
ystrain = y1[::stride,:]
xstrain = np.r_[xs1[:m/2,:], xs1[m/2+10:,:]]
ystrain = np.r_[y1[:m/2,:], y1[m/2+10:,:]]
index = np.hstack((np.arange(m/2), np.arange(m/2+10,m)))
gp1 = GaussProcess(xstrain, ystrain, kernel=kernel_euclid,
ridgecoeff=5*1e-4)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true y versus noisy y and estimated y')
plt.figure()
plt.plot(index,ystrain.ravel(),'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
MAndelkovic/pybinding | pybinding/__init__.py | 1 | 2321 | from .__about__ import (__author__, __copyright__, __doc__, __email__, __license__, __summary__,
__title__, __url__, __version__)
import os
import sys
if sys.platform.startswith("linux"):
# When the _pybinding C++ extension is compiled with MKL, it requires specific
# dlopen flags on Linux: RTLD_GLOBAL. This will not play nice with some scipy
# modules, i.e. it will produce segfaults. As a workaround, specific modules
# are imported first with default dlopenflags.
# After that, RTLD_GLOBAL must be set for MKL to load properly. It's not possible
# to set RTLD_GLOBAL, import _pybinding and then reset to default flags. This is
# fundamentally an MKL issue which makes it difficult to resolve. This workaround
# is the best solution at the moment.
import scipy.sparse.linalg
import scipy.spatial
sys.setdlopenflags(sys.getdlopenflags() | os.RTLD_GLOBAL)
import _pybinding as _cpp
from .model import *
from .lattice import *
from .shape import *
from .modifier import *
from .results import *
from .chebyshev import *
from .parallel import parallel_for, parallelize
from . import (constants, greens, parallel, pltutils, results, solver, system, utils)
def tests(options=None, plugins=None):
"""Run the tests
Parameters
----------
options : list or str
Command line options for pytest (excluding target file_or_dir).
plugins : list
Plugin objects to be auto-registered during initialization.
"""
import pytest
import pathlib
import matplotlib as mpl
from .utils.misc import cd
args = options or []
if isinstance(args, str):
args = args.split()
module_path = pathlib.Path(__file__).parent
if (module_path / 'tests').exists():
# tests are inside installed package -> use read-only mode
args.append('--failpath=' + os.getcwd() + '/failed')
with cd(module_path), pltutils.backend('Agg'):
args += ['-c', str(module_path / 'tests/local.cfg'), str(module_path)]
error_code = pytest.main(args, plugins)
else:
# tests are in dev environment -> use development mode
with cd(module_path.parent), pltutils.backend('Agg'):
error_code = pytest.main(args, plugins)
return error_code or None
| bsd-2-clause |
schreiberx/sweet | benchmarks_sphere/rexi_mass_energy_galewsky_martinium/postprocessing_output_vort_err_vs_dt.py | 2 | 3235 | #! /usr/bin/env python3
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
from matplotlib.lines import Line2D
#
# First, use
# ./postprocessing.py > postprocessing_output.txt
# to generate the .txt file
#
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
#mode = 'simtime'
mode = 'dt'
with open('postprocessing_output_vort.txt') as f:
lines = f.readlines()
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
if len(sys.argv) > 1:
output_filename = sys.argv[1]
else:
output_filename = "./postprocessing_output_vort_err_vs_"+mode+".pdf"
if len(sys.argv) > 2:
plot_set = sys.argv[2:]
else:
plot_set = []
def plot(x, y, marker, linestyle, label):
# plot values and prev_name
print(label)
#print(values_err)
#print(values_time)
#print("")
if len(x) == 0:
return
if len(plot_set) != 0:
if prev_name not in plot_set:
return
ax.plot(x, y, marker=marker, linestyle=linestyle, label=label)
prev_name = ''
values_err = []
values_time = []
c = 2
for l in lines:
if l[-1] == '\n':
l = l[0:-1]
d = l.split("\t")
if d[0] == 'Running tests for new group:':
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
for i, txt in enumerate(values_time):
ax.annotate("%.1f" % txt, (values_time[i]*1.03, values_err[i]*1.03))
prev_name = d[0]
values_err = []
values_time = []
c = c+1
continue
if len(d) != 5:
continue
if d[0] == 'SIMNAME':
continue
prev_name = d[0]
prev_name = prev_name.replace('script_ln2_b100_g9.81_h10000_f7.2921e-05_p0_a6371220_u0.0_rob1_fsph0_tsm_', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time128', '')
prev_name = prev_name.replace('_M0128_MPI_space01_time001', '')
prev_name = prev_name.replace('_prcircle_nrm0_hlf0_pre1_ext00', '')
prev_name = prev_name.replace('_tso2_tsob2_REXICI', '')
prev_name = prev_name.replace('_C0040', '')
prev_name = prev_name.replace('_C0080', '')
prev_name = prev_name.replace('_C0160', '')
prev_name = prev_name.replace('_C0320', '')
prev_name = prev_name.replace('_C0640', '')
prev_name = prev_name.replace('_C1280', '')
prev_name = prev_name.replace('_C2560', '')
prev_name = prev_name.replace('_mr10.0_mi30.0', '')
prev_name = prev_name.replace('_n0064_sx50.0_sy50.0', '')
prev_name = prev_name.replace('_n0064', '')
prev_name = prev_name.replace('_sx50.0_sy50.0', '')
prev_name = re.sub(r"_mu.*", "", prev_name)
prev_name = re.sub(r"0000", "", prev_name)
values_err.append(float(d[1]))
if mode == 'simtime':
#
# SIMTIME
#
values_time.append(float(d[4]))
plt.xlabel("simulation time")
elif mode == 'dt':
#
# DT
#
m = re.search('_C([0-9]*)', d[0])
dt = float(m.group(1))
values_time.append(dt)
plt.xlabel("Timestep size")
plt.ylabel("Error")
plot(values_time, values_err, markers[c % len(markers)], linestyles[c % len(linestyles)], prev_name)
plt.legend()
plt.savefig(output_filename)
#plt.show()
| mit |
computational-neuroimaging-lab/Clark2015_AWS | spot-model/run_static_model.py | 1 | 4615 | # run_static_model.py
#
# Author: Daniel Clark, 2015
'''
'''
# Load config file and run static model
def load_and_run(config, av_zone, price_hr):
'''
'''
# Import packages
import os
import numpy as np
import pandas as pd
import yaml
# Relative imports
from spot_price_model import calc_s3_model_costs
import utils
# Init variables
df_rows = []
cfg_dict = yaml.load(open(config, 'r'))
# Model parameters
down_rate = cfg_dict['down_rate']
in_gb = cfg_dict['in_gb']
instance_type = cfg_dict['instance_type']
jobs_per = cfg_dict['jobs_per']
num_jobs_arr = cfg_dict['num_jobs']
out_gb = cfg_dict['out_gb']
out_gb_dl = cfg_dict['out_gb_dl']
proc_time = cfg_dict['proc_time']
proc_time *= 60.0 # convert to seconds
product = cfg_dict['product']
up_rate = cfg_dict['up_rate']
# Evaluate for each dataset size (number of jobs)
for num_jobs in num_jobs_arr:
print '%d datasets...' % num_jobs
# Tune parameters for cost model
num_nodes = min(np.ceil(float(num_jobs)/jobs_per), 20)
num_iter = np.ceil(num_jobs/float((jobs_per*num_nodes)))
# Runtime parameters
run_time = num_iter*proc_time
wait_time = 0
pernode_cost = np.ceil(run_time/3600.0)*price_hr
num_interruipts = 0
first_iter_time = proc_time
# Grab costs from s3 model
print av_zone
total_cost, instance_cost, ebs_storage_cost, s3_cost, \
s3_storage_cost, s3_req_cost, s3_xfer_cost, \
total_time, run_time, wait_time, \
xfer_up_time, s3_upl_time, s3_download_time = \
calc_s3_model_costs(run_time, wait_time, pernode_cost,
first_iter_time, num_jobs, num_nodes, jobs_per,
av_zone, in_gb, out_gb, up_rate, down_rate)
# Populate dictionary
row_dict = {'av_zone' : av_zone, 'down_rate' : down_rate,
'in_gb' : in_gb, 'instance_type' : instance_type,
'jobs_per' : jobs_per, 'num_datasets' : num_jobs,
'out_gb' : out_gb, 'out_gb_dl' : out_gb_dl,
'proc_time' : proc_time, 'product' : product,
'up_rate' : up_rate, 'price_hr' : price_hr,
'static_total_cost' : total_cost,
'static_instance_cost' : instance_cost,
'static_ebs_storage_cost' : ebs_storage_cost,
's3_total_cost' : s3_cost,
's3_storage_cost' : s3_storage_cost,
's3_req_cost' : s3_req_cost,
's3_xfer_cost' : s3_xfer_cost,
'static_total_time' : total_time,
'static_run_time' : run_time,
'static_wait_time' : wait_time,
'xfer_up_time' : xfer_up_time,
's3_upl_time' : s3_upl_time,
's3_dl_time' : s3_download_time}
# Convert to pandas series and add to list
row_series = pd.Series(row_dict)
df_rows.append(row_series)
# Create static model dataframe
static_df = pd.DataFrame.from_records(df_rows)
out_df_path = os.path.join(os.getcwd(), os.path.basename(config).split('.')[0], str(price_hr), av_zone, 'on_demand.csv')
if not os.path.exists(os.path.dirname(out_df_path)):
os.makedirs(os.path.dirname(out_df_path))
static_df.to_csv(out_df_path)
# Return dataframe
return static_df
# Make executable
if __name__ == '__main__':
# Import packages
import argparse
import os
# Init argparser
parser = argparse.ArgumentParser(description=__doc__)
# Required arguments
parser.add_argument('-c', '--config', nargs=1, required=True,
type=str, help='Filepath to the sim config file')
parser.add_argument('-p', '--price_hr', nargs=1, required=True,
type=float, help='Price per compute hour to assume')
parser.add_argument('-z', '--av_zone', nargs=1, required=False, type=str,
help='Specify availability zone of interest')
# Parse arguments
args = parser.parse_args()
# Init variables
config = args.config[0]
price_hr = args.price_hr[0]
av_zone = args.av_zone[0]
# Call static model function
static_df = load_and_run(config, av_zone, price_hr)
# Write to disk
out_df_path = os.path.join(os.getcwd(), os.path.basename(config).split('.')[0], str(price_hr), av_zone, 'on_demand.csv')
static_df.to_csv(out_df_path)
| mit |
I--P/numpy | numpy/linalg/linalg.py | 4 | 75738 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
arnomoonens/Mussy-Robot | recommender.py | 1 | 6772 | #!/usr/bin/python
# -*- coding: utf8 -*-
import sys
import numpy as np
import pandas as pd
# from sklearn.metrics.pairwise import paired_distances
import re
import csv
from collections import Counter
from sklearn.feature_extraction import DictVectorizer
from scipy.spatial.distance import euclidean
# Idea: Have a database with for each feature how important it is for the user
# Feature is for example a specific artist
# To suggest a new song, find one with similar features
class MusicRecommender(object):
"""Item-based recommender for songs."""
def __init__(self, songs_csv_path, feedback_file=None, preprocessed=True):
super(MusicRecommender, self).__init__()
self.songs_csv_path = songs_csv_path
self.feedback_file = feedback_file
if preprocessed:
self.df = pd.read_csv(self.songs_csv_path, index_col=0)
else:
self.df = self.preprocess(self.songs_csv_path)
self.rated_songs = {}
self.used_features = [
{'name': 'ArtistName', 'type': 'categorical'},
{'name': 'Tempo', 'type': 'numerical'},
{'name': 'KeySignature', 'type': 'categorical'},
{'name': 'Danceability', 'type': 'numerical'},
{'name': 'ArtistLocation', 'type': 'categorical'}
]
self.vectors = {}
for column_name in ['ArtistName', 'ArtistLocation', 'KeySignature']:
counter = Counter(self.df['ArtistName'].values)
vector = DictVectorizer(sparse=False)
vector.fit([dict(counter)])
self.vectors[column_name] = vector
return
def available_songs(self):
return len(self.df)
def preprocess(self, songs_csv_path):
"""Make a cleaned dataframe of the data in songs_csv_path."""
df = pd.read_csv(songs_csv_path, index_col=0, na_values={'Year': [0], 'ArtistLocation': ["b''"]})
regex1 = re.compile(r"b'(.*)'\"?")
regex2 = re.compile(r"b(.*)\"\"")
def clean_value(x):
try:
new_title, n_changes = regex1.subn(r"\1", x)
except:
return x
if n_changes > 0:
return new_title
else:
return regex2.sub(r"\1", x)
df['Title'] = df['Title'].apply(clean_value)
df['ArtistName'] = df['ArtistName'].apply(clean_value)
df['ArtistLocation'] = df['ArtistLocation'].apply(clean_value)
df['ArtistID'] = df['ArtistID'].apply(clean_value)
df['SongID'] = df['SongID'].apply(clean_value)
df['AlbumName'] = df['AlbumName'].apply(clean_value)
return df
def get_song_information(self, song_id):
"""Retrieve a song with id song_id from the dataframe."""
return self.df.loc[song_id]
def song_vector(self, song_id, extra_information=[], flattened=False):
"""Puts the song_id, extra_information and the features used for recommendation in a (flattened) array."""
song = self.df.loc[song_id]
song_vector = [] + extra_information
for feature in self.used_features:
name = feature['name']
if feature['type'] == 'categorical':
song_vector.append(self.vectors[name].transform([{song[name]: 1}])[0])
else:
song_vector.append(song[name])
if flattened:
return np.hstack(song_vector)
else:
return np.array(song_vector, dtype=np.object)
# song_vector = np.vectorize(song_vector, otypes=np.object)
def recommend_song(self, sample_size=50, include_heard_songs=False):
"""Recommend a song (by giving it's id) based on the user's preferences"""
songs_arr = np.array(list(self.rated_songs.values()), dtype=np.object)
songs_ids = list(self.rated_songs.keys())
songs_scores = songs_arr[:, 0] # How much the user liked it
songs_features = songs_arr[:, 1:]
# print("Making profile")
profile = []
for i, feature in enumerate(self.used_features):
if feature['type'] == 'categorical':
profile.extend(np.sum(songs_features[:, i] * songs_scores))
else:
profile.append(np.mean(songs_features[:, i] * songs_scores))
profile = np.array(profile)
# print("Making song vectors")
if include_heard_songs:
songs_subset = np.random.choice(self.df.index, min(sample_size, len(self.df)))
else:
songs_subset = np.random.choice(np.setdiff1d(self.df.index, songs_ids), min(sample_size, len(self.df) - len(songs_ids)), replace=False)
song_vectors = np.array([self.song_vector(i, flattened=True) for i in songs_subset])
def similarity(x):
"""Calculate the euclidean distance between the profile and the features of a song vector"""
return np.linalg.norm(profile - x)
# similarity = np.vectorize(similarity)
# print("Computing similarities")
# similarities = similarity(song_vectors)
similarities = [similarity(x) for x in song_vectors]
return songs_subset[np.argmin(similarities)]
def song_feedback(self, song_id, score=1):
"""Rate the song with id song_id. By default gives a score of 1."""
self.rated_songs[song_id] = self.song_vector(song_id, extra_information=[score])
return
def read_feedback(self):
"""Add the feedback of multiple songs from a file."""
if not(self.feedback_file):
print("Please give a value for feedback_file first.")
return
f = open(self.feedback_file)
reader = csv.reader(f)
next(reader) # Skip the header
for song_id, score in reader:
self.song_feedback(int(song_id), score=int(score))
f.close()
return
def save_feedback(self):
"""Save the feedback of songs to a file"""
if not(self.feedback_file):
print("Please give a value for feedback_file first.")
return
f = open(self.feedback_file, 'w')
writer = csv.writer(f)
writer.writerow(['id', 'score'])
for song in self.rated_songs:
writer.writerow(song[:2])
f.close()
return
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Please provide the path to a CSV file with songs information.")
sys.exit(1)
recommender = MusicRecommender(sys.argv[1])
recommender.song_feedback(5)
recommender.song_feedback(343)
recommender.song_feedback(234)
recommended = recommender.recommend_song()
song = recommender.get_song_information(recommended)
print("Recommended: '{}' by '{}' (id {})".format(song['Title'], song['ArtistName'], recommended))
| mit |
Unidata/MetPy | v1.0/_downloads/133a14b679bd8a5b58cfff249184243a/meteogram_metpy.py | 1 | 8853 | # Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_from_relative_humidity
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram:
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = f'Latest Ob Time: {self.time}\nProbe ID: {probeid}'
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90))
ax7.set_yticklabels(['NE', 'SE', 'SW', 'NW'])
lines = ln1 + ln2 + ln3
labs = [line.get_label() for line in lines]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates, t, 'r-', label='Temperature')
self.ax2.fill_between(self.dates, t, td, color='r')
self.ax2.set_ylabel('Temperature\n(F)', multialignment='center')
self.ax2.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates, td, 'g-', label='Dewpoint')
self.ax2.fill_between(self.dates, td, self.ax2.get_ylim()[0], color='g')
ax_twin = self.ax2.twinx()
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lines = ln4 + ln5
labs = [line.get_label() for line in lines]
ax_twin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates, p, 'm', label='Mean Sea Level Pressure')
self.ax4.set_ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
self.ax4.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin.fill_between(self.dates, p, axtwin.get_ylim()[0], color='m')
axtwin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
self.ax4.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pres = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')),
'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')),
'wind_direction': np.array(wd) * units('degrees'),
'dewpoint': dewpoint_from_relative_humidity((np.array(temp) * units.degC).to(units.K),
np.array(rh) / 100.).to(units('degF')),
'air_temperature': (np.array(temp) * units('degC')).to(units('degF')),
'mean_slp': calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa'),
'relative_humidity': np.array(rh), 'times': np.array(date)}
fig = plt.figure(figsize=(20, 16))
add_metpy_logo(fig, 250, 180)
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
| bsd-3-clause |
loli/semisupervisedforests | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
googleinterns/cloud-monitoring-accessible-charts | clustering.py | 1 | 26616 | import json
from statistics import median
import numpy as np
from sklearn.cluster import KMeans, DBSCAN
from sklearn.preprocessing import scale
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import pairwise_distances, pairwise_distances_argmin_min
from sklearn.metrics import pairwise_distances_argmin
from sklearn.decomposition import PCA
from scipy.spatial import distance
import count
# These params where determined by testing various k, eps produced by running
# tuning_k and tuning_eps respectively, and picking the parameters that were at
# the curve for their respective functions and produced reasonable clusters.
KMEANS_RATIO = 20
KMEANS_MIN = 6
EPS_CORRELATION_NONE = 2.4
EPS_PROXIMITY_NONE = 1.3
EPS_PROXOMITY_ONE_HOT = 2.1
EPS_CORRELATION_ONE_HOT = 2.7
# It is common to reinitialize the centroids for k-means 10 times.
NUM_RUNS = 10
def time_series_array(data, key):
"""Converts the time series data to an np array.
Args:
data: A timeSeries object.
key: The key for the time series labels that are saved. If None,
then all label values may be kept, otherwise only label
values with that key are kept.
Returns:
An np array where each row represents a resource and each column
represents the value at time t, a dictionary mapping each label
to an index and an np array where each row represents a time series
and each column represents a label as defined in the label dictionary.
"""
first_val = data["timeSeries"][0]["points"][0]["value"]["doubleValue"]
date_to_index, label_to_count, min_max = {}, {}, [first_val, first_val]
count.get_dates_labels(data, date_to_index, label_to_count, min_max, key)
if not key:
labels = list(filter(lambda k: label_to_count[k] >= 2 and
label_to_count[k] < len(data["timeSeries"]),
label_to_count.keys()))
else:
labels = list(label_to_count.keys())
label_to_index = dict(zip(labels, range(len(labels))))
num_instances = len(data["timeSeries"])
num_times = len(date_to_index)
data_array = [0]*num_instances
instance_labels = [0]*num_instances
for index, time_series in enumerate(data["timeSeries"]):
points = [-1]*num_times
for point in time_series["points"]:
start_time = point["interval"]["startTime"]
points[date_to_index[start_time]] = scale_to_range(
min_max, point["value"]["doubleValue"])
data_array[index] = points
encoding = [0]*len(label_to_index)
count.one_hot_encoding(time_series["metric"]["labels"],
label_to_index, encoding)
count.one_hot_encoding(time_series["resource"]["labels"],
label_to_index, encoding)
instance_labels[index] = encoding
data_array = np.array(data_array)
instance_labels = np.array(instance_labels)
return data_array, label_to_index, instance_labels, date_to_index, min_max
def scale_to_range(min_max_old, element, min_max_new=[0, 10]):
"""Scale element from min_max_new to the new range, min_max_old.
Args:
min_max_old: Original range of the data, [min, max].
element: Integer that will be scaled to the new range, must be
within the old_range.
min_max_new: New range of the data.
Returns:
element scaled to the new range.
"""
new_range = min_max_new[1] - min_max_new[0]
old_range = min_max_old[1] - min_max_old[0]
return ((element - min_max_old[0]) * new_range) / old_range
def fill_with_median(data):
"""Fills missing values (-1) in a time series to the median of the
time series.
Args:
data: A list where each row is a resource and each column is a
time.
"""
data_array = data
for row in data_array:
med = median(list(filter(lambda elt: elt != -1, row)))
for i, val in enumerate(row):
if val == -1:
row[i] = med
return data_array
def preprocess(data, label_encoding, similarity, ts_to_labels, algorithm):
"""Updates the data according to label_encoding and similarity.
Args:
data: Array where each row is a time series and each column is
a date.
label_encoding: The method used for encoding the labels. Must
be "none" or "one-hot".
similarity: The similarity measure used for scaling the data
before clustering. Must be "proximity" or "correlation".
ts_to_labels: Array where each row is a time series and each
column is a label.
algorithm: The algorithm that will be run on data.
Returns:
An np array updated according to label_encoding, similarity and
algorithm.
"""
updated_data = data
if similarity == "correlation":
updated_data = scale_to_zero(updated_data)
if algorithm == "dbscan":
if similarity == "correlation":
pca = PCA(n_components=.75)
elif similarity == "proximity":
pca = PCA(n_components=.85)
pca.fit(updated_data)
updated_data = pca.transform(updated_data)
if label_encoding == "one-hot":
updated_data = np.concatenate((updated_data, ts_to_labels), axis=1)
return updated_data
def scale_to_zero(data):
"""Scales the data such that the minimum of each time series is at
zero.
Args:
data: A timeSeries object.
Returns:
An np array of the scaled data.
"""
min_data = np.min(data)
scaled_data = [arr - abs(min_data - val) for arr, val in zip(
data, data.min(axis=1))]
return scaled_data + abs(min_data)
def tuning_k(data):
"""Runs k-means clustering with different values of k and creates a
list of the sum distances of samples to their cluster center.
Args:
data: A timeSeries object.
Returns:
A list where the nth entry represents the sum of squared
distances of samples to their cluster center when k-means is
run with k set to n+1.
"""
distances = []
data = scale(data)
for i in range(1, len(data) // 2):
kmeans_result = KMeans(n_clusters=i, random_state=0).fit(data)
distances.append(kmeans_result.inertia_)
return distances
def tuning_eps(data):
"""Runs nearest neighbors to identify the distance of the closest
neighbor of each time series.
Args:
data: A timeSeries object.
Returns:
A sorted list of the distance of each time series to its
closest neighbor.
"""
neighbors = NearestNeighbors(n_neighbors=2)
fitted = neighbors.fit(data)
distances, _ = fitted.kneighbors(data)
return (np.sort(distances[:, 1])).tolist()
def tuning_eps_options(data, start, end, min_clusters):
"""Returns a list of tuples (num_outliers, num_clusters, eps) such
that runing dbscan with each eps results in more than min_clusters.
The options are sorted according to the number of oulliers produced.
Args:
data: An array where each row is a time series and each column
is a date.
start: Smallest eps that is tested.
end: Largest eps that is tested.
min_clusters: The minimum number of clusters that should be
produced by dbscan.
Returns:
A list of tuples where each tuple has num_outliers, num_clusters,
and eps.
"""
ops = []
while start <= end:
dbscan_result = DBSCAN(eps=start, min_samples=2).fit(data)
num_outliers = len(np.where(dbscan_result.labels_ == -1)[0])
num_clusters = len(np.unique(dbscan_result.labels_))
if num_clusters > min_clusters:
ops.append((num_outliers, num_clusters, start))
start += 0.1
ops.sort()
return ops
def kmeans(data, outlier):
"""Generates clusters using kmeans.
Args:
data: A timeSeries object.
outlier: Indicates whether outliers are labeled as outliers.
Returns:
A list of cluster labels such that the nth element in the list
represents the cluster the nth element was placed in. Cluster
labels are integers.
"""
data = scale(data)
tuning_ratio = len(data) // KMEANS_RATIO
kmeans_result = KMeans(n_clusters=tuning_ratio + KMEANS_MIN,
random_state=0).fit(data)
labels = np.copy(kmeans_result.labels_) + 1
if outlier == "on":
outliers_kmeans(data, labels, kmeans_result.cluster_centers_)
return labels
def dbscan(data, similarity, encoding, outlier):
"""Generates clusters using DBSCAN.
Args:
data: A timeSeries object.
similarity: The similarity measure used for scaling the data
before clustering. Must be "proximity" or "correlation".
label_encoding: The method used for encoding the labels. Must
be "none" or "one-hot".
outlier: Indicates whether outliers are labeled as outliers.
Returns:
A list of cluster labels such that the nth element in the list
represents the cluster the nth element was placed in. Cluster
labels are integers.
"""
if similarity == "correlation" and encoding == "none":
eps_tuned = EPS_CORRELATION_NONE
elif similarity == "correlation" and encoding == "one-hot":
eps_tuned = EPS_CORRELATION_ONE_HOT
elif encoding == "none":
eps_tuned = EPS_PROXIMITY_NONE
else:
eps_tuned = EPS_PROXOMITY_ONE_HOT
dbscan_result = DBSCAN(eps=eps_tuned, min_samples=2).fit(data)
cluster_assignment = np.copy(dbscan_result.labels_)
medians, _ = cluster_medians(data, cluster_assignment)
outlier_indexes = np.where(cluster_assignment == -1)[0]
cluster_assignment += 1
closest = pairwise_distances_argmin(data[outlier_indexes, :], medians)
for index, index_ts in enumerate(outlier_indexes):
if outlier == "on":
cluster_assignment[index_ts] = - (closest[index] + 1)
else:
cluster_assignment[index_ts] = closest[index] + 1
return cluster_assignment
def cluster_medians(data, cluster_assignment):
"""Calculates the cluster medians based on the cluster_assignment.
Args:
data: Array where each row is a time series and each column is
a date.
cluster_assignment: An array of cluster labels where the nth
element is the cluster the nth time series was placed in.
Returns:
A tuple (cluster_median, valid) where median is an array where
the nth element is the median of the nth cluster if valid==True,
meaning there was a valid assignment, otherwise returns an empty
list and False. """
clusters = {}
for index in np.where(cluster_assignment >= 0)[0]:
label = cluster_assignment[index]
if label not in clusters:
clusters[label] = [data[index]]
else:
clusters[label] = np.append(clusters[label], [data[index]], axis=0)
medians = []
for i in range(len(clusters)):
if i not in clusters:
return [], False
medians.append(np.median(clusters[i], axis=0))
return np.array(medians), True
def cluster_to_labels(cluster_labels, resource_to_label):
"""Returns a list of the percentage of elements in a cluster that
share the same label.
Args:
cluster_labels: An array where the ith element indicates what
cluster the ith time series was assigned to.
resource_to_label: An array where entry [i][j] is a 0 if time
series i had the label with index j.
Returns:
A 2d list where each entry [i][j] represents the percentage of
time series in cluster i that have label j.
"""
cluster_count = {}
for label in cluster_labels:
if label not in cluster_count:
cluster_count[label] = 1
else:
cluster_count[label] += 1
ordered_cluster_count = sorted(cluster_count.items(), key=lambda x: x[0])
values = [x[1] for x in ordered_cluster_count]
cluster_to_label = np.zeros((len(cluster_count), len(resource_to_label[0])))
for index, element in enumerate(cluster_labels):
cluster_to_label[element] += resource_to_label[index]
return np.multiply(cluster_to_label.T, 1/np.array(values)).T
def sort_labels(label_dict, cluster_labels, ts_to_labels):
"""Returns a list of the sorted labels, an np array of clusters to
labels, and an np array of time series to labels. In the np arrays,
the columns are sorted according to the sorted labels.
Args:
cluster_labels: An array where each row is a cluster and each
column is a label.
label_dict: A dictionary where each key is a system label and
each value is the index of the label in cluster_labels and
ts_to_labels.
ts_to_labels: An array where each row is a time series and each
column is a label.
"""
system_labels = list(label_dict.keys())
ordered = np.argsort(np.array(system_labels))
ordered_labels = [0]*len(system_labels)
ordered_ts_labels = np.zeros(ts_to_labels.shape)
ordered_cluster_labels = np.zeros(cluster_labels.shape)
for i, elt in enumerate(ordered):
ordered_ts_labels[:, i] = ts_to_labels[:, elt]
ordered_cluster_labels[:, i] = cluster_labels[:, elt]
ordered_labels[i] = system_labels[elt]
return ordered_labels, ordered_cluster_labels, ordered_ts_labels
def outliers_kmeans(data, ts_cluster_labels, cluster_centers):
"""Updates ts_cluster_labels to reflect whether a time series is an
outlier in the cluster it was assigned to. '-n' indicates an outlier
in cluster n.
Args:
data: Array where each row is a time series and each column is
a date.
ts_cluster_labels: Array where the ith element is the cluster
the ith time series was placed in.
cluster_centers: The centroids that were outputted when the
clustering algorithm was run.
"""
for index, label in enumerate(ts_cluster_labels):
euc_dist = distance.euclidean(data[index], cluster_centers[label - 1])
if euc_dist > 6.75:
ts_cluster_labels[index] = -label
def kmeans_kmedians(data, label_dict, ts_to_labels, algorithm, outlier):
"""Runs k-means with constraints or k-medians based on algorithm.
Uses a k-means++ initialization.
Args:
data: An np array where each row is a time series and each
column is a time.
label_dict: A dictionary where the keys are labels and the
values are the indexes of the labels in data.
ts_to_labels: An array where each row is a timeSeries and each
column is a label.
algorithm: The algorithm run on data, must be k-means or k-medians.
outlier: Indicates whether outliers are labeled as outliers.
Returns:
An np array where the ith element is the cluster the ith time
series was placed in.
"""
must_link, can_not_link = {}, {}
if algorithm == "k-means-constrained":
must_link, can_not_link = make_constraints(ts_to_labels)
num_clusters = (len(data) // KMEANS_RATIO) + KMEANS_MIN
clusters_distances = []
for run_num in range(NUM_RUNS):
centroids = k_means_init(data, num_clusters, run_num)
while True:
old_centroids = centroids.copy()
clusters, ts_to_cluster = update_clusters(data, centroids,
must_link, can_not_link)
if algorithm == "k-means-constrained":
valid_clusters = update_centroids(data, clusters, centroids)
if algorithm == "k-medians":
ts_cluster_ar = np.array([v for k, v in sorted(
ts_to_cluster.items(), key=lambda item: item[0])])
centroids, valid_clusters = cluster_medians(data, ts_cluster_ar)
if not valid_clusters:
break
if np.array_equal(old_centroids, centroids):
assignment = [v for k, v in sorted(ts_to_cluster.items(),
key=lambda item: item[0])]
center_dist = 0
for index, cluster in enumerate(assignment):
center_dist += distance.euclidean(data[index],
centroids[cluster])
clusters_distances.append([center_dist, assignment, centroids])
break
clusters_distances.sort()
result = np.array(clusters_distances[0][1]) + 1
if outlier == "on":
outliers_kmeans(data, result, clusters_distances[0][2])
return result
def update_clusters(data, centroids, must_link, can_not_link):
"""Updates the cluster assignments based on the centroids and the
must_link and can_not_link constraints. Assigns each time series
to the closest centroid which does not violate the constraints.
Args:
data: An np array where each row is a time series and each
column is a time.
centroids: A list of the centroids.
must_link: A dictionary mapping time series that must link.
can_not_link: A dictionary mapping time series that can't link.
Returns:
clusters: A list where the ith element is a list of the indexes
of the elements in the ith cluster.
ts_to_cluster: A dictionary mapping each time series to its
cluster assignment.
"""
distances_ts_to_cluster = pairwise_distances(data, centroids)
clusters = [[] for i in range(len(centroids))]
ts_to_cluster = {}
for ts_index, dist in enumerate(distances_ts_to_cluster):
options = np.argsort(dist)
for option in options:
invalid = violates_cons(option, ts_index, ts_to_cluster,
must_link, can_not_link)
if not invalid:
ts_to_cluster[ts_index] = option
clusters[option].append(ts_index)
break
return clusters, ts_to_cluster
def update_centroids(data, clusters, centroids):
"""Updates the centroids based on the elements in each cluster.
Args:
data: An np array where each row is a time series and each
column is a time.
clusters: A list where the ith element is a list of the indexes
of the elements in the ith cluster.
centroids: A list of the centroids.
Returns:
True if the centroids were updated without an error, False
otherwise.
"""
for cluster_ind, _ in enumerate(centroids):
if len(clusters[cluster_ind]) == 0:
return False
total = np.zeros((centroids.shape[1]))
for ts_index in clusters[cluster_ind]:
total += data[ts_index]
centroids[cluster_ind] = total / len(clusters[cluster_ind])
return True
def k_means_init(data, num_clusters, run_num):
"""Runs k-means++ initialization which aims to spread out the
cluster centroids.
Args:
data: An np array where each row is a time series and each
column is a time.
num_clusters: The number of clusters.
run_num: The number of times k-means has been reinitialized. It
is used as the seed for np.random.seed.
Returns:
An np array where the ith element is the ith cluster centroid.
"""
np.random.seed(run_num)
num_ts = len(data)
first = int(num_ts * .2)
centroids = np.array([data[first]])
for _ in range(num_clusters -1):
_, distances = pairwise_distances_argmin_min(data, centroids)
choices = np.random.choice(num_ts, 1, p=distances/np.sum(distances))
picked = choices[0]
centroids = np.append(centroids, [data[picked]], axis=0)
return centroids
def violates_cons(option, ts_1, ts_to_cluster, must_link, can_not_link):
"""Checks if any constraint is violated by placing ts_1 in the
cluster option.
Args:
option: The index of the cluster where the time series ts_1
may be placed.
ts_1: The time series being placed.
ts_to_cluster: A dictionary where each key is a time series
index and each value is the index of the cluster the time
series is assigned to.
must_link: A dictionary where each key is a time series index
and each value is a list of time series indexes which must
be in the same cluster.
can_not_link: A dictionary where each key is a time series index
and each value is a list of time series indexes which can
not be in the same cluster as the key.
Returns:
True if a constraint was violated and False otherwise.
"""
if ts_1 in must_link:
for ts_2 in must_link[ts_1]:
if ts_2 in ts_to_cluster and ts_to_cluster[ts_2] != option:
return True
if ts_1 in can_not_link:
for ts_2 in can_not_link[ts_1]:
if ts_2 in ts_to_cluster and ts_to_cluster[ts_2] == option:
return True
return False
def make_constraints(ts_to_labels):
"""Makes must link and can not link constraints. Uses ts_to_labels
to get the unique time series label combinations and makes the
constraints based on the label similarity of the time series.
Args:
ts_to_labels: An array where each row is a time series and each
column is a label.
Returns:
must_link: A dictionary mapping time series that must link.
can_not_link: A dictionary mapping time series that can't link.
"""
np.random.seed(0)
must_link, can_not_link = {}, {}
limit = len(ts_to_labels) * .03
unique_label_patterns = np.unique(ts_to_labels, axis=0)
pattern_to_rows = {}
greater_than_limit = []
for index, row in enumerate(unique_label_patterns):
ts_indexes = np.where(np.all(ts_to_labels == row, axis=1))[0]
pattern_to_rows[index] = ts_indexes
if len(ts_indexes) > limit:
greater_than_limit.append(index)
for pattern, indexes in pattern_to_rows.items():
index_1 = np.random.choice(indexes, 1)[0]
sec_pattern = np.random.choice(greater_than_limit, 1)[0]
index_2 = np.random.choice(pattern_to_rows[sec_pattern], 1)[0]
if pattern != sec_pattern:
add_link(index_1, index_2, can_not_link)
else:
add_link(index_1, index_2, must_link)
return must_link, can_not_link
def add_link(index_1, index_2, link_dict):
"""Adds a link from index_1 to index_2 and index_2 to index_1.
Args:
index_1: Index of the first element.
index_2: Index of the second element.
link_dict: Dictionary where each key is an element index and
each value is a list of element indexes for which the key
has a link.
"""
if index_1 in link_dict:
link_dict[index_1].append(index_2)
if index_2 in link_dict:
link_dict[index_2].append(index_1)
if index_1 not in link_dict:
link_dict[index_1] = [index_2]
if index_2 not in link_dict:
link_dict[index_2] = [index_1]
def cluster_zone(label_dict, ts_to_labels):
"""Clusters the time series based on their zone label.
Args:
label_dict: A dictionary where each key is a system label and
each value is the index of the label (column) in
ts_to_labels. All keys are zone keys.
ts_to_labels: An array where each row is a time series and each
column is a label.
Returns:
A list where the ith entry is the name of the cluster the ith
time series was placed in."""
index_to_label = dict((v, k) for k, v in label_dict.items())
labels = [0] * ts_to_labels.shape[0]
zone_label = np.argwhere(ts_to_labels)
for ts_index, zone_index in zone_label:
labels[ts_index] = index_to_label[zone_index]
return labels
def clusters_min_max(data, assignment, date_to_index, old_range, outlier):
"""Calculates the min and max value at each point for each cluster.
Args:
data: Array where each row is a time series and each column is
a date.
assignment: Array where the ith element is the cluster the ith
time series was placed in.
date_to_index: A dictionary mapping each date to its index.
old_range: The original range for the values in data.
outlier: Whether outliers are identified, must be "on" or "off".
Returns:
A tuple of the fomrat (min_max, dates) where min_max[i] has a
list of the minimum and maximum values of time series i and
dates has the corresponding dates for the given values.
"""
sorted_dates = sorted(date_to_index.items(), key=lambda x: x[1])
dates = [date for date, index in sorted_dates]
# Rescales num to its original scale, otheriwse returns nan.
def original_scale(num):
if num != -1:
return scale_to_range([0, 10], num, old_range)
return np.nan
clusters, outlier_indexes = {}, []
# Each iteration rescales the time series at index to its original scale
# and appends it to the list of time series for the cluster. If
# outlier == "on" then the time series is not appended to the list of time
# series for that cluster, instead, the c;uster index is added to the list
# of outlier_indexes
for index, label in enumerate(assignment):
if outlier == "off" or label > 0:
abs_label = abs(label)
time_series = np.array(list(map(original_scale, data[index])))
if abs_label not in clusters:
clusters[abs_label] = np.expand_dims(time_series, axis=0)
else:
clusters[abs_label] = np.append(clusters[abs_label],
[time_series], axis=0)
else:
outlier_indexes.append(index)
entries = data.shape[1]
min_max = np.full((max(clusters.keys()), 2, entries), old_range[0])
for label in clusters:
for col in range(entries):
if not np.all(np.isnan(np.array(clusters[label])[:, col])):
min_max[label -1][0][col] = np.nanmin(clusters[label][:, col])
min_max[label -1][1][col] = np.nanmax(clusters[label][:, col])
return min_max.tolist(), dates, outlier_indexes
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.