repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gotomypc/scikit-learn
|
benchmarks/bench_sparsify.py
|
323
|
3372
|
"""
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
|
bsd-3-clause
|
MarianThull/BattleshipAI
|
battleship.py
|
1
|
8122
|
from __future__ import division
__author__ = 'marian'
import copy
import random
import numpy as np
import matplotlib.pyplot as plt
class Battleship:
WATER = 0
HIT = 1
SUNKEN = 2
WON = 3
def __init__(self, size, ships=[], random=True):
self.size = size
self.positions = []
self.single_ships = []
self.ships = ships
self.shoot_count = 0
if random:
self.gen_random_positions()
def gen_random_positions(self):
forbidden = [(i, -1) for i in range(self.size)] + [(i, self.size) for i in range(self.size)] + \
[(-1, j) for j in range(self.size)] + [(self.size, j) for j in range(self.size)]
for ship in self.ships:
horizontal = random.randint(0, 1)
legal = False
while not legal:
if horizontal:
row = random.randint(0, 9)
left = random.randint(0, self.size - ship)
s = [(row, j) for j in range(left, left + ship)]
else:
column = random.randint(0, 9)
top = random.randint(0, self.size - ship)
s = [(i, column) for i in range(top, top + ship)]
legal = True
for pos in s:
if pos in forbidden:
legal = False
break
if horizontal:
for i in range(row - 1, row + 2):
for j in range(left - 1, left + ship + 1):
forbidden.append((i, j))
else:
for i in range(top - 1, top + ship + 1):
for j in range(column -1, column + 2):
forbidden.append((i, j))
self.positions += s
self.single_ships.append(s)
def print_field(self):
f = np.zeros((self.size, self.size))
for i, j in self.positions:
f[i][j] = 1
print f
def shoot(self, i, j):
self.shoot_count += 1
if (i, j) not in self.positions:
return Battleship.WATER
for ship in self.single_ships:
if (i, j) in ship:
ship.remove((i, j))
if len(ship) == 0:
self.single_ships.remove(ship)
if len(self.single_ships) == 0:
return Battleship.WON
return Battleship.SUNKEN
return Battleship.HIT
class BattleshipAI:
def __init__(self, size, ships, game):
self.size = size
self.ships = ships
self.game = game
self.shots = []
self.sunken = []
self.hits = []
self.heat_matrix = np.zeros((size, size))
self.feel_the_heat()
def feel_the_heat(self):
# reset heat_matrix
self.heat_matrix = np.zeros((self.size, self.size))
# place every ship in all positions
for ship in self.ships:
# iterate over rows
for i in range(self.size):
# check all horizontal positions
for j_left in range(0, self.size - ship + 1):
js = range(j_left, j_left + ship)
# if there is a shot at one of the squares it is not a legal position
legal = True
for j in js:
if (i, j) in self.shots:
legal = False
if legal:
for j in js:
self.heat_matrix[i][j] += 1
# iterate over columns
for j in range(self.size):
# check all vertical positions
for i_top in range(0, self.size - ship + 1):
i_s = range(i_top, i_top + ship)
# if there is a shot at one of the squares it is not a legal position
legal = True
for i in i_s:
if (i, j) in self.shots:
legal = False
if legal:
for i in i_s:
self.heat_matrix[i][j] += 1
def local_heat_check(self):
rows = list(set([hit[0] for hit in self.hits]))
columns = list(set([hit[1] for hit in self.hits]))
# reset heat_matrix
self.heat_matrix = np.zeros((self.size, self.size))
# place every ship in all positions
for ship in self.ships:
# if horizontal or unknown check row
if len(rows) == 1:
i = rows[0]
# check all horizontal positions
for j_left in range(0, self.size - ship + 1):
js = range(j_left, j_left + ship)
# if there is a shot at one of the squares it is not a legal position
legal = True
for j in js:
if (i, j) in self.shots:
legal = False
if legal:
for j in js:
if min(columns) - j == 1 or j - max(columns) == 1:
self.heat_matrix[i][j] += 1
# iterate over columns
if len(columns) == 1:
j = columns[0]
# check all vertical positions
for i_top in range(0, self.size - ship + 1):
i_s = range(i_top, i_top + ship)
# if there is a shot at one of the squares it is not a legal position
legal = True
for i in i_s:
if (i, j) in self.shots:
legal = False
if legal:
for i in i_s:
if min(rows) - i == 1 or i - max(rows) == 1:
self.heat_matrix[i][j] += 1
def hot_shot(self):
a = np.argmax(self.heat_matrix)
j = a % self.size
i = int((a - j) / self.size)
v = self.game.shoot(i, j)
if v == Battleship.HIT:
self.hits.append((i, j))
self.sunken.append((i, j))
elif v == Battleship.SUNKEN:
self.hits.append((i, j))
rows = [h[0] for h in self.hits]
columns = [h[1] for h in self.hits]
for k in range(min(rows) - 1, max(rows) + 2):
for l in range(min(columns) - 1, max(columns) + 2):
self.shots.append((k, l))
self.hits = []
self.sunken.append((i, j))
elif v == Battleship.WON:
self.shots += self.hits
self.sunken.append((i, j))
else:
self.shots.append((i, j))
if len(self.hits) > 0:
self.local_heat_check()
else:
self.feel_the_heat()
return self.heat_matrix, copy.deepcopy(self.sunken)
def plot_heat(states, cmap, vmin, vmax):
subplts = np.ceil(np.sqrt(len(states)))
subx = int(1.7 * subplts)
suby = np.ceil(len(states) / subx)
for i in range(len(states)):
heat_m, sunken = states[i]
for k, l in sunken:
heat_m[k][l] = -1
plt.subplot(suby, subx, i + 1, aspect=1)
n = plt.pcolor(heat_m, cmap=cmap, vmin=vmin, vmax=vmax, edgecolor='k')
n.axes.set_ylim(0, heat_m.shape[1])
n.axes.set_xlim(0, heat_m.shape[0])
if __name__ == '__main__':
size = 12
ships = [5, 4, 3, 3, 2]
game = Battleship(size, ships, True)
ai = BattleshipAI(size, ships, game)
vmin = 0
vmax = ai.heat_matrix.max()
custom_cmap = plt.cm.hot
custom_cmap.set_under('b')
states = []
count = 0
while True:
count += 1
state = ai.hot_shot()
# print 'Shot {}. {} fields hit.'.format(count, len(state[1]))
states.append(state)
if len(state[1]) == sum(ships) or count == 100:
break
print 'Won with {} shots.'.format(count)
plot_heat(states, custom_cmap, vmin, vmax)
plt.show()
|
mit
|
wangdegang/RWD
|
code/degang.py
|
1
|
3718
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 18:56:59 2018
@author: Wang_Degang
Module contains handy functions to get the object size, running time,
submit Impala queries, convert parquet data to pandas data frame, etc.
Functions
---------
hsize - get human readable size of an object
rtime - get running time of a process
impa - submit Impala query and run in Hadoop system
todf - submit Impala query and get pandas data frame of the query result
nullCheck - get numbers and %s of null values for each column in a data frame
"""
import sys
import pyodbc
import pandas as pd
from datetime import datetime
#=======================================================================
# object size human readable
#=======================================================================
def hsize(obj):
"""
Get the human readble size of an object.
Parameters
----------
obj : obj
Any python objects
Returns
-------
result : str
A string states how big the object is.
"""
num = sys.getsizeof(obj)
for unit in ['bytes','KB','MB','GB','TB','PB','EB','ZB']:
if num < 1024.0:
print( '%.2f %s' % (num, unit))
break
num /= 1024.0
#=======================================================================
# processing time
#=======================================================================
'''
from datetime import datetime
t0 = datetime.now()
process()
t1 = datetime.now()
d = t1-t0
rtime(d)
'''
def rtime(obj):
m, s = divmod(obj.seconds, 60)
#h, m = divmod(m, 60)
return print(f'Used {m} mins {s} secs')
#=======================================================================
# pyodbc to pandas
#=======================================================================
'''
def as_pandas(cursor):
names = [metadata[0] for metadata in cursor.description]
return pd.DataFrame.from_records(cursor.fetchall(), columns=names)
import pyodbc
conn = pyodbc.connect(dsn='myrwe', autocommit=True)
cursor = conn.cursor()
cursor.execute("show databases")
as_pandas(cursor)
cursor.close()
conn.close()
'''
# impala operations in hadoop
def impa(*args):
t0 = datetime.now()
conn = pyodbc.connect(dsn='myrwe', autocommit=True)
cursor = conn.cursor()
if len(args) == 1:
cursor.execute(args[0])
else:
q = f'''create table if not exists {args[0]}.{args[1]}
stored as parquet as {args[2]}
'''
cursor.execute(q)
cursor.close()
conn.close()
t1 = datetime.now()
return rtime(t1-t0)
# save query to a pandas data frame
def todf(obj):
conn = pyodbc.connect(dsn='myrwe', autocommit=True)
cursor = conn.cursor()
cursor.execute(obj)
names = [metadata[0] for metadata in cursor.description]
df = pd.DataFrame.from_records(cursor.fetchall(), columns=names)
for col in df:
if df[col].dtype == 'datetime64[ns]':
df[col] = df[col].apply(lambda x: x.date())
return df
cursor.close()
conn.close()
#=======================================================================
# count the number of missing records
#=======================================================================
def nullCheck(obj):
dfnon = pd.DataFrame(obj.count(), columns=['nonNull'])
dfnul = pd.DataFrame(obj.isnull().sum(), columns=['Null'])
df = dfnon.join(dfnul)
df['NullPct'] = df['Null']/obj.shape[0]*100
fmt = {'nonNull' : '{:,}',
'Null' : '{:,}',
'NullPct' : '{:.2f}%'}
for key, value in fmt.items():
df[key] = df[key].apply(value.format)
return print(df)
|
mit
|
amitkaps/machine-learning
|
check_env.py
|
1
|
2767
|
# Authors: Amit Kapoor and Bargava Subramanian
# Copyright (c) 2016 Amit Kapoor
# License: MIT License
"""
This script will check if the environment setup is correct for the workshop.
To run, please execute the following command from the command prompt
>>> python check_env.py
The output will indicate if any of the libraries are missing or need to be updated.
This script is inspired from https://github.com/fonnesbeck/scipy2015_tutorial/blob/master/check_env.py
"""
from __future__ import print_function
try:
import curses
curses.setupterm()
assert curses.tigetnum("colors") > 2
OK = "\x1b[1;%dm[ OK ]\x1b[0m" % (30 + curses.COLOR_GREEN)
FAIL = "\x1b[1;%dm[FAIL]\x1b[0m" % (30 + curses.COLOR_RED)
except:
OK = '[ OK ]'
FAIL = '[FAIL]'
import sys
try:
import importlib
except ImportError:
print(FAIL, "Python version 2.7 is required, but %s is installed." % sys.version)
from distutils.version import LooseVersion as Version
def import_version(pkg, min_ver, fail_msg=""):
mod = None
try:
mod = importlib.import_module(pkg)
if((pkg=="spacy" or pkg=="wordcloud") and (mod > 0)):
print(OK, '%s ' % (pkg))
else:
#else:
version = getattr(mod, "__version__", 0) or getattr(mod, "VERSION", 0)
if Version(version) < min_ver:
print(FAIL, "%s version %s or higher required, but %s installed."
% (lib, min_ver, version))
else:
print(OK, '%s version %s' % (pkg, version))
except ImportError:
print(FAIL, '%s not installed. %s' % (pkg, fail_msg))
return mod
# first check the python version
print('Using python in', sys.prefix)
print(sys.version)
pyversion = Version(sys.version)
if pyversion < "3":
print(FAIL, "Python version 3 is required, but %s is installed." % sys.version)
elif pyversion >= "2":
if pyversion == "2.7":
print(FAIL, "Python version 2.7 is installed. Please upgrade to version 3." )
else:
print(FAIL, "Unknown Python version: %s" % sys.version)
print()
requirements = {
'gensim' :'0.12.4',
'IPython' : '4.0.3',
'jupyter' :'1.0.0',
'lda' : '1.0.3',
'networkx' : '1.11',
'nltk' : '3.1',
'matplotlib' :'1.5.0',
'nltk' : '3.1',
'numpy' : '1.10.4',
'pandas' : '0.17.1',
'PIL' : '1.1.7',
'scipy' : '0.17.0',
'sklearn' : '0.17',
'seaborn' :'0.6.0',
'spacy' :'0.100.6',
'statsmodels':'0.6.1',
'wordcloud' :'0.1',
'xgboost' :'0.4'
}
# now the dependencies
for lib, required_version in list(requirements.items()):
import_version(lib, required_version)
|
mit
|
nborggren/zipline
|
zipline/history/history.py
|
1
|
9719
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pandas as pd
import re
from zipline.errors import IncompatibleHistoryFrequency
def parse_freq_str(freq_str):
# TODO: Wish we were more aligned with pandas here.
num_str, unit_str = re.match('([0-9]+)([A-Za-z]+)', freq_str).groups()
return int(num_str), unit_str
class Frequency(object):
"""
Represents how the data is sampled, as specified by the algoscript
via units like "1d", "1m", etc.
Currently only two frequencies are supported, "1d" and "1m"
- "1d" provides data at daily frequency, with the latest bar aggregating
the elapsed minutes of the (incomplete) current day
- "1m" provides data at minute frequency
"""
SUPPORTED_FREQUENCIES = frozenset({'1d', '1m'})
MAX_MINUTES = {'m': 1, 'd': 390}
MAX_DAYS = {'d': 1}
def __init__(self, freq_str, data_frequency, env):
if freq_str not in self.SUPPORTED_FREQUENCIES:
raise ValueError(
"history frequency must be in {supported}".format(
supported=self.SUPPORTED_FREQUENCIES,
))
# The string the at the algoscript specifies.
# Hold onto to use a key for caching.
self.freq_str = freq_str
# num - The number of units of the frequency.
# unit_str - The unit type, e.g. 'd'
self.num, self.unit_str = parse_freq_str(freq_str)
self.data_frequency = data_frequency
self.env = env
def next_window_start(self, previous_window_close):
"""
Get the first minute of the window starting after a window that
finished on @previous_window_close.
"""
if self.unit_str == 'd':
return self.next_day_window_start(previous_window_close, self.env,
self.data_frequency)
elif self.unit_str == 'm':
return self.env.next_market_minute(previous_window_close)
@staticmethod
def next_day_window_start(previous_window_close, env,
data_frequency='minute'):
"""
Get the next day window start after @previous_window_close. This is
defined as the first market open strictly greater than
@previous_window_close.
"""
if data_frequency == 'daily':
next_open = env.next_trading_day(previous_window_close)
else:
next_open = env.next_market_minute(previous_window_close)
return next_open
def window_open(self, window_close):
"""
For a period ending on `window_end`, calculate the date of the first
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_open(window_close, self.num)
elif self.unit_str == 'm':
return self.minute_window_open(window_close, self.num)
def window_close(self, window_start):
"""
For a period starting on `window_start`, calculate the date of the last
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_close(window_start, self.num)
elif self.unit_str == 'm':
return self.minute_window_close(window_start, self.num)
def day_window_open(self, window_close, num_days):
"""
Get the first minute for a daily window of length @num_days with last
minute @window_close. This is calculated by searching backward until
@num_days market_closes are encountered.
"""
open_ = self.env.open_close_window(
window_close,
1,
offset=-(num_days - 1)
).market_open.iloc[0]
if self.data_frequency == 'daily':
open_ = pd.tslib.normalize_date(open_)
return open_
def minute_window_open(self, window_close, num_minutes):
"""
Get the first minute for a minutely window of length @num_minutes with
last minute @window_close.
This is defined as window_close if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_close
return self.env.market_minute_window(
window_close, count=-num_minutes
)[-1]
def day_window_close(self, window_start, num_days):
"""
Get the window close for a daily frequency.
If the data_frequency is minute, then this will be the last minute of
last day of the window.
If the data_frequency is minute, this will be midnight utc of the last
day of the window.
"""
if self.data_frequency != 'daily':
return self.env.get_open_and_close(
self.env.add_trading_days(num_days - 1, window_start),
)[1]
return pd.tslib.normalize_date(
self.env.add_trading_days(num_days - 1, window_start),
)
def minute_window_close(self, window_start, num_minutes):
"""
Get the last minute for a minutely window of length @num_minutes with
first minute @window_start.
This is defined as window_start if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_start
return self.env.market_minute_window(
window_start, count=num_minutes
)[-1]
def prev_bar(self, dt):
"""
Returns the previous bar for dt.
"""
if self.unit_str == 'd':
if self.data_frequency == 'minute':
def func(dt):
return self.env.get_open_and_close(
self.env.previous_trading_day(dt))[1]
else:
func = self.env.previous_trading_day
else:
func = self.env.previous_market_minute
# Cache the function dispatch.
self.prev_bar = func
return func(dt)
@property
def max_bars(self):
if self.data_frequency == 'daily':
return self.max_days
else:
return self.max_minutes
@property
def max_days(self):
if self.data_frequency != 'daily':
raise ValueError('max_days requested in minute mode')
return self.MAX_DAYS[self.unit_str] * self.num
@property
def max_minutes(self):
"""
The maximum number of minutes required to roll a bar at this frequency.
"""
if self.data_frequency != 'minute':
raise ValueError('max_minutes requested in daily mode')
return self.MAX_MINUTES[self.unit_str] * self.num
def normalize(self, dt):
if self.data_frequency != 'daily':
return dt
return pd.tslib.normalize_date(dt)
def __eq__(self, other):
return self.freq_str == other.freq_str
def __hash__(self):
return hash(self.freq_str)
def __repr__(self):
return ''.join([str(self.__class__.__name__),
"('", self.freq_str, "')"])
class HistorySpec(object):
"""
Maps to the parameters of the history() call made by the algoscript
An object is used here so that get_history calls are not constantly
parsing the parameters and provides values for caching and indexing into
result frames.
"""
FORWARD_FILLABLE = frozenset({'price'})
@classmethod
def spec_key(cls, bar_count, freq_str, field, ffill):
"""
Used as a hash/key value for the HistorySpec.
"""
return "{0}:{1}:{2}:{3}".format(
bar_count, freq_str, field, ffill)
def __init__(self, bar_count, frequency, field, ffill, env,
data_frequency='daily'):
# Number of bars to look back.
self.bar_count = bar_count
if isinstance(frequency, str):
frequency = Frequency(frequency, data_frequency, env)
if frequency.unit_str == 'm' and data_frequency == 'daily':
raise IncompatibleHistoryFrequency(
frequency=frequency.unit_str,
data_frequency=data_frequency,
)
# The frequency at which the data is sampled.
self.frequency = frequency
# The field, e.g. 'price', 'volume', etc.
self.field = field
# Whether or not to forward fill nan data. Only has an effect if this
# spec's field is in FORWARD_FILLABLE.
self._ffill = ffill
# Calculate the cache key string once.
self.key_str = self.spec_key(
bar_count, frequency.freq_str, field, ffill)
@property
def ffill(self):
"""
Wrapper around self._ffill that returns False for fields which are not
forward-fillable.
"""
return self._ffill and self.field in self.FORWARD_FILLABLE
def __repr__(self):
return ''.join([self.__class__.__name__, "('", self.key_str, "')"])
|
apache-2.0
|
ishank08/scikit-learn
|
benchmarks/bench_glmnet.py
|
111
|
3890
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
yonglehou/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
CforED/Machine-Learning
|
sklearn/linear_model/tests/test_ridge.py
|
19
|
26553
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
# ignore warning that solvers are changed to SAG for
# temporary fix
@ignore_warnings
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
|
bsd-3-clause
|
leal26/AeroPy
|
examples/morphing/flight_conditions/nonmorphed/range_constant_velocity.py
|
2
|
6505
|
import aeropy.xfoil_module as xf
from aeropy.geometry.airfoil import CST, create_x
from aeropy.morphing.camber_2D import *
from aeropy.aero_module import air_properties, Reynolds, LLT_calculator
from scipy.interpolate import griddata, RegularGridInterpolator
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
def aircraft_range_varying_AOA(f_L, f_LD, velocity):
def to_integrate(weight):
# velocity = 0.514444*108 # m/s (113 KTAS)
def calculate_AOA(velocity):
def residual(AOA):
CL = f_L([velocity, AOA[0]])[0]
span = 11
AR = 7.32
chord_root = span/AR
dynamic_pressure = .5*density*velocity**2*(span*chord_root)
return abs(CL - weight/dynamic_pressure)
if len(AOA_list) == 0:
x0 = 0
else:
x0 = AOA_list[-1]
res = scipy.optimize.minimize(residual, x0, bounds = [[0, 12],])#, options={'ftol':1e-9})
return res.x[0]
AOA = calculate_AOA(velocity)
# print(AOA)
AOA_list.append(AOA)
lift_to_drag = f_LD([velocity, AOA])
span = 10.9728
RPM = 1800
a = 0.3089 # (lb/hr)/BTU
b = 0.008*RPM+19.607 # lb/hr
lbhr_to_kgs = 0.000125998
BHP_to_watt = 745.7
eta = 0.85
thrust = weight/lift_to_drag
power_SI = thrust*velocity/eta
power_BHP = power_SI/BHP_to_watt
mass_flow = (a*power_BHP + b)
mass_flow_SI = mass_flow*lbhr_to_kgs
SFC = mass_flow_SI/thrust
dR = velocity/g/SFC*lift_to_drag/weight
return dR*0.001 # *0.0005399
AOA_list = []
g = 9.81 # kg/ms
fuel = 56*6.01*0.4535*g
initial_weight = 1111*g
final_weight = initial_weight-fuel
x = np.linspace(final_weight, initial_weight, 100)
y = []
for x_i in x:
y.append(to_integrate(x_i)[0])
range = scipy.integrate.simps(y, x)
return range, AOA_list
# ==============================================================================
# Inputs
# ==============================================================================
altitude = 10000 # ft
air_props = air_properties(altitude, unit='feet')
density = air_props['Density']
# data = pandas.read_csv('performance_grid.csv')
# psi_spars = [0.1, 0.3, 0.6, 0.8]
# c_P = 1.0
# ranges = []
# for i in range(len(data.values)):
# AC = data.values[i,0:4]
# velocity = data.values[i,-4]
# AOA = data.values[i,-5]
# cl= data.values[i,-3]
# cd = data.values[i,-2]
# CL, CD = coefficient_LLT(AC, velocity, AOA)
# data.values[i, -3] = CL
# data.values[i, -2] = CD
# data.values[i, -1] = CL/CD
# print(i, CL, CD)
# data = data.drop_duplicates()
import pickle
# f = open('wing.p', 'wb')
# pickle.dump(data, f)
# f.close()
state = 'nonmorphed'
concepts = ['NACA0012', 'NACA4415', 'NACA641212', 'glider']
#
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# # print(aoa)
# # print(velocity)
# # print(cl)
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = concept)
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('cl')
# plt.show()
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,100)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = concept)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()
range_data = {}
plt.figure()
for concept in concepts:
mat = scipy.io.loadmat(state + '_' + concept)
aoa = mat['aoa'][0]
velocity = mat['V'][0]
cl = mat['CL'].T
LD_ratio = mat['lift_to_drag']
f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = np.linspace(20, 65, 7)
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = velocity[i])
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = velocity[i])
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
ranges = []
# velocity = np.linspace(20, 60, 5)
for i in range(len(velocity)):
range_i, AOA_i = aircraft_range_varying_AOA(f_L, f_LD, velocity[i])
# plt.plot(np.arange(len(AOA_i)), AOA_i, label=velocity[i])
# plt.scatter(np.arange(len(AOA_i)),AOA_i)
print(i, velocity[i], range_i)
ranges.append(range_i)
# print(velocity[36])
range_data[concept] = ranges
plt.plot(velocity, ranges, lw=2, label=concept)
f = open('ranges_velocity.p', 'wb')
pickle.dump(range_data, f)
f.close()
# plt.xlim(min(velocity), max(velocity))
# plt.ylim(min(ranges), max(ranges))
plt.xlabel('Velocity (m/s)')
plt.ylabel('Range (km)')
plt.legend()
plt.show()
|
mit
|
lazywei/scikit-learn
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
BorisJeremic/Real-ESSI-Examples
|
education_examples/_Chapter_Material_Behaviour_Examples/Multi_Yield_Surface_von_Mises_GGmax/plot_GGmax.py
|
1
|
2914
|
import numpy as np
import matplotlib.pyplot as plt
# target
userInput1= [0,1E-6,1E-5,5E-5,1E-4, 0.0005, 0.001, 0.005, 0.01];
userInput2= [1,0.99563892,0.96674888,0.87318337,0.78735192,0.46719464,0.32043423,0.10940113,0.06347752];
Gmax = 3E8;
poisson = 0.0;
# #################################
gamma = userInput1
GGmax = userInput2
G=[Gmax * item for item in GGmax]
tau = np.zeros(len(gamma))
for it in xrange(1,len(gamma)):
tau[it] = gamma[it] * G[it]
print tau
epsilon = [item/2. for item in gamma]
# # ============================================
# # Figure 1
# # Plot the stress-strain
# # ============================================
strain = np.loadtxt("strain.feioutput")
stress = np.loadtxt("stress.feioutput")
essiGamma = [2*item for item in strain]
# plt.plot(gamma, tau, 'b-^', label='Input' )
# plt.plot(essiGamma, stress, 'r-*', label=' ESSI')
# plt.legend(loc=2)
# plt.xlabel('Strain / (unitless)')
# plt.ylabel('Stress / (Pa)')
# plt.title('Material Behavior: Stress-Strain')
# plt.grid()
# plt.box()
# plt.savefig('backbone.pdf', transparent=True, bbox_inches='tight')
# plt.show()
# # ============================================
# # Figure 3
# # Plot the G/Gmax
# # ============================================
# avoid the divide by zero
fontSIZE = 21
import matplotlib as mpl
label_size = fontSIZE
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
stress[0]=stress[1]
strain[0]=strain[1]
essiG = [a/b/2. for a,b in zip(stress, strain)]
essiGGmax = [item/Gmax for item in essiG]
# strain_plot = [100* x for x in strain]
# stress_plot = [1./1000* x for x in stress]
plt.semilogx(essiGamma, essiGGmax, 'b-', label='ESSI', linewidth = 5.0 )
plt.semilogx(gamma , GGmax , 'r--', label='Input', linewidth = 5.0 )
# plt.legend(loc=3)
plt.legend(loc=3, prop={'size':fontSIZE})
plt.title('Multi-Yield-Surface vonMises G/Gmax',fontsize=fontSIZE)
plt.xlabel('Strain / (unitless)',fontsize=fontSIZE)
plt.ylabel('G/Gmax / (unitless)',fontsize=fontSIZE)
plt.grid()
plt.box()
plt.savefig('GGmax.pdf', transparent=True, bbox_inches='tight')
plt.show()
# strain_stress = np.loadtxt('strain_stress.txt')
# strain = strain_stress[:,0]
# stress = strain_stress[:,1]
# # vol_s = strain_stress[:,3]
# strain_plot = [100* x for x in strain]
# stress_plot = [1./1000* x for x in stress]
# plt.plot(strain_plot,stress_plot,linewidth=3.0)
# # plt.plot(vol_s, stress)
# minY = min(stress_plot)*1.05
# maxY = max(stress_plot)*1.05
# minX = min(strain_plot)*1.05
# maxX = max(strain_plot)*1.05
# plt.ylim([minY, maxY])
# plt.xlim([minX, maxX])
# plt.xlabel('Shear Strain (%) ',fontsize=fontSIZE)
# plt.ylabel('Shear Stress (kPa)',fontsize=fontSIZE)
# plt.grid()
# # plt.show()
# plt.savefig('multiSurface.pdf', dpi=1200, transparent=True, bbox_inches='tight')
# # plt.savefig('multiSurface', format='svg', dpi=1200, transparent=True, bbox_inches='tight')
|
cc0-1.0
|
kyleabeauchamp/PMTStuff
|
code/NSD1_gmm.py
|
1
|
1788
|
import mixtape.featurizer, mixtape.tica, mixtape.cluster, mixtape.markovstatemodel, mixtape.ghmm
import numpy as np
import mdtraj as md
from parameters import load_trajectories, build_full_featurizer
import sklearn.pipeline, sklearn.externals.joblib
import mixtape.utils
n_choose = 100
stride = 1
lag_time = 1
trj0, trajectories, filenames = load_trajectories(stride=stride)
train = trajectories[0::2]
test = trajectories[1::2]
featurizer = sklearn.externals.joblib.load("./featurizer-%d.job" % n_choose)
n_components = 3
n_states = 5
tica = mixtape.tica.tICA(n_components=n_components, lag_time=lag_time)
subsampler = mixtape.utils.Subsampler(lag_time=lag_time)
msm = mixtape.markovstatemodel.MarkovStateModel()
cluster = mixtape.cluster.GMM(n_components=n_states, covariance_type='full')
feature_pipeline = sklearn.pipeline.Pipeline([("features", featurizer), ('tica', tica)])
cluster_pipeline = sklearn.pipeline.Pipeline([("features", featurizer), ('tica', tica), ("cluster", cluster)])
pipeline = sklearn.pipeline.Pipeline([("features", featurizer), ('tica', tica), ("subsampler", subsampler), ("cluster", cluster), ("msm", msm)])
pipeline.fit(train)
X_all = feature_pipeline.transform(trajectories)
q = np.concatenate(X_all)
covars_ = cluster.covars_
covars_ = cluster.covars_.diagonal(axis1=1, axis2=2)
for i, j in [(0, 1)]:
figure()
hexbin(q[:,i], q[:, j], bins='log')
errorbar(cluster.means_[:, i], cluster.means_[:, j], xerr=covars_[:,i] ** 0.5, yerr=covars_[:, j] ** 0.5, fmt='kx', linewidth=4)
states = cluster_pipeline.transform(trajectories)
ind = msm.draw_samples(states, 3)
samples = mixtape.utils.map_drawn_samples(ind, trajectories)
for i in range(n_states):
for k, t in enumerate(samples[i]):
t.save("pdbs/state%d-%d.pdb" % (i, k))
|
gpl-2.0
|
devanshdalal/scikit-learn
|
sklearn/ensemble/weight_boosting.py
|
29
|
41090
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
sample_weight = check_array(sample_weight, ensure_2d=False)
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight,
random_state)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight,
random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if n_classes == 1:
return np.ones((X.shape[0], 1))
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = stable_cumsum(sample_weight)
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
JPFrancoia/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
39
|
4039
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
bsd-3-clause
|
apdjustino/DRCOG_Urbansim
|
src/opus_gui/results_manager/run/indicator_framework/reporter/indicator_results.py
|
1
|
15139
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os
from opus_gui.results_manager.run.indicator_framework.utilities.indicator_meta_data import IndicatorMetaData
from opus_gui.results_manager.run.indicator_framework.utilities.indicator_data_manager import IndicatorDataManager
from opus_gui.results_manager.run.indicator_framework.maker.source_data import SourceData
class IndicatorResults(object):
""" Takes the descriptions and locations of precomputed indicators
and generates an html file to browse them.
The purpose of IndicatorResults is to take in a description
of the indicators that were requested through the GUI and output
an html file that can be used to browse the indicator results.
There is a single entry point and all the other functions are private.
"""
def __init__(self, show_error_dialog = False):
self.show_error_dialog = show_error_dialog
self.data_manager = IndicatorDataManager()
def create_page(self,
source_data,
indicators,
output_directory = None,
page_name = 'indicator_results.html'):
""" Generates the html page based on information about precomputed indicators.
The file path to the generated html page is returned.
Parameters:
source_data -- information about where the indicators were computed from
page_name -- the filename of the outputted html file
indicators -- a list of the generated indicators
"""
#stores the html that is built up over the course of execution
html = []
self.indicator_documentation_mapping = {}
html.append( self._output_header() )
html.append( self._start_body() )
#generate html for configuration info
html.append( self._output_configuration_info(source_data) )
html.append( self._start_table() )
#load previously computed indicators
indicator_dirs = []
for i in indicators.values():
#if i.write_to_file:
dir = i.storage_location
if not dir in indicator_dirs:
indicator_dirs.append(dir)
indicators = []
for dir in indicator_dirs:
try:
indicators += self.data_manager.import_indicators(
indicator_directory = dir)
except:
pass
rows = []
self._output_body(source_data, indicators, rows)
unique_rows = dict([(' '.join(x), x) for x in rows])
#rows_by_date_dict = dict([(x[4],x) for x in unique_rows.itervalues()])
sorted_rows = unique_rows.items()
sorted_rows.sort(reverse = True)
sorted_rows = [row[1] for row in sorted_rows]
for row in sorted_rows:
html.append(self._output_row(row))
html.append( self._end_table() )
html.append( self._end_page() )
if output_directory is None:
output_directory = indicator_dirs[0]
file = open(os.path.join(
output_directory,
page_name),
'w')
file.write(''.join(html))
file.close()
return file.name
def _output_body(self, source_data, indicators, rows, test = False):
""" Generates the html for the indicators.
Finds the indicator file for every year each of the indicators
were run and formats this into a table.
test is only used for unit testing
"""
for indicator in indicators:
years = indicator.years
if years == []: continue
if indicator.is_single_year_indicator_image_type():
links = []
for year in years:
path = indicator.get_file_path(year)
if os.path.exists(path) or test:
link = self._get_link(indicator.get_file_name(year),str(year))
links.append(link)
image_paths = ','.join(links)
else:
#aggregate multiyear run data so it is outputted nicely...
path = indicator.get_file_path()
if os.path.exists(path) or test:
year_aggregation = self._get_year_aggregation(years)
image_paths = self._get_link(indicator.get_file_name(),year_aggregation)
doc_link = self._get_doc_link(indicator)
row = [
doc_link,
indicator.dataset_name,
indicator.get_visualization_shorthand(),
image_paths
]
rows.append(row)
def _get_year_aggregation(self, years):
"""Given a sequence of years, outputs a string that represents
the years with dashes between consecutive years
(e.g. "1983,1985-1987,1999")
"""
years = list(years) #traits funniness
years.sort()
if years == []: return ''
year_aggregation = []
(first, last) = (years[0], years[0])
for i in range(1,len(years)):
if years[i] == last + 1: last = years[i]
else:
if first == last: year_aggregation.append('%i' % first)
else: year_aggregation.append('%i-%i' % (first, last))
(first, last) = (years[i], years[i])
if first == last: year_aggregation.append('%i' % first)
else: year_aggregation.append('%i-%i' % (first, last))
return ','.join(year_aggregation)
#private HTML output functions. None of the above functions directly outputs any HTML.
def _output_configuration_info(self, source_data):
html = []
config_fields = {
'Cache directory: ' : source_data.cache_directory,
}
for title,value in config_fields.iteritems():
html.append('<b>%s</b>%s<br><br>\n'%(title,value))
return ''.join(html)
def _get_doc_link(self, indicator):
urls = []
for attribute in indicator.attributes:
try:
attribute_alias = indicator.get_attribute_alias(attribute)
url = self._get_documentation_url(attribute_alias)
except:
url = None
if url is None:
url = indicator.get_file_name(suppress_extension_addition=True)
else:
url = self._get_link(url,
indicator.get_file_name(suppress_extension_addition=True))
urls.append(url)
return '<br>'.join(urls)
def _get_link(self,url,name):
url = url.replace('\\\\','/////').replace('\\','/')
return '<A HREF="%s">%s</A>' % (url, name)
def _get_documentation_url(self, attribute):
""" A mapping between attribute name and documentation"""
if self.indicator_documentation_mapping == {}:
indicator_info = IndicatorMetaData.get_indicator_info()
self.indicator_documentation_mapping = {}
for (name, path, variable, documentation) in indicator_info:
self.indicator_documentation_mapping[variable] = documentation
try:
doc_file = self.indicator_documentation_mapping[attribute]
prefix = IndicatorMetaData.get_indicator_documentation_URL()
except: return None
return os.path.join(prefix,doc_file)
#HTML outputting methods
def _output_header(self):
return '<head><title>Indicator Results</title></head>\n'
def _output_section(self, title):
return '<h2>%s</h2>\n' % title
def _start_body(self):
return '<body>\n' + '<h2>Indicator Results</h2>\n'
def _start_table(self):
return (
'<table border=1 cellspacing="0" cellpadding="5" style="border-style: solid; border-color: black">\n'
'\t<tr>\n'
'\t\t<td><b>Indicator name</b></td>\n'
'\t\t<td><b>Dataset</b></td>\n'
'\t\t<td><b>Type</b></td>\n'
'\t\t<td><b>Years</b></td>\n'
'\t</tr>\n')
def _end_table(self):
return '</table>\n'
def _output_row(self, row):
html = ['\t<tr>\n']
for col in row:
html.append( '\t\t<td>%s</td>\n' % col )
html.append( '\t</tr>\n' )
return ''.join(html)
def _end_page(self):
return '</body>'
from opus_core.tests import opus_unittest
import tempfile
import shutil
from opus_core.configurations.dataset_pool_configuration import DatasetPoolConfiguration
from opus_gui.results_manager.run.indicator_framework.test_classes.test_with_attribute_data import TestWithAttributeData
class IndicatorResultsTests(TestWithAttributeData):
def setUp(self):
TestWithAttributeData.setUp(self)
self.i_results = IndicatorResults()
self.i_results.indicator_documentation_mapping = {}
self.source_data = SourceData(
cache_directory = self.temp_cache_path,
run_id = -1,
name = 'test',
run_description = '(opus_core)',
dataset_pool_configuration = DatasetPoolConfiguration(
package_order=['opus_core'],
))
def test__year_aggregation(self):
year_aggregation = self.i_results._get_year_aggregation([2001,2002,2004,2006,2007])
self.assertEqual(year_aggregation, "2001-2002,2004,2006-2007")
year_aggregation = self.i_results._get_year_aggregation([2001,2002,2004,2006])
self.assertEqual(year_aggregation, "2001-2002,2004,2006")
year_aggregation = self.i_results._get_year_aggregation([2000,2002])
self.assertEqual(year_aggregation, "2000,2002")
def test__output_configuration_info(self):
output = (
'<b>Cache directory: </b>%s<br><br>\n'%self.temp_cache_path
)
html = self.i_results._output_configuration_info(self.source_data)
self.assertEqual(output, html)
def test___get_documentation_url(self):
output = 'http://www.urbansim.org/docs/indicators/population.xml'
result = self.i_results._get_documentation_url('population')
self.assertEqual(result, output)
def test__output_indicators(self):
try:
from opus_gui.results_manager.run.indicator_framework.image_types.mapnik_map import Map
from opus_gui.results_manager.run.indicator_framework.image_types.matplotlib_chart import Chart
from opus_gui.results_manager.run.indicator_framework.image_types.matplotlib_lorenzcurve import LorenzCurve
except: pass
else:
self.source_data.years = [1980, 1982]
requests = [
Map(
source_data = self.source_data,
attribute = 'opus_core.test.population',
scale = [1,75000],
dataset_name = 'test'
),
Chart(
source_data = self.source_data,
attribute = 'opus_core.test.population',
dataset_name = 'test',
name = 'my_name',
),
Chart(
source_data = self.source_data,
attribute = 'opus_core.test.population',
dataset_name = 'test',
years = [1981]
),
LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.population',
dataset_name = 'test'
),
]
image_type = requests[0].get_visualization_shorthand()
(dataset,name) = (requests[0].dataset_name,
requests[0].name)
image_type2 = requests[1].get_visualization_shorthand()
(dataset2,name2) = (requests[1].dataset_name,
requests[1].name)
image_type3 = requests[2].get_visualization_shorthand()
(dataset3,name3) = (requests[2].dataset_name,
requests[2].name)
image_type4 = requests[3].get_visualization_shorthand()
(dataset4,name4) = (requests[3].dataset_name,
requests[3].name)
doc_link = '<A HREF="http://www.urbansim.org/docs/indicators/population.xml">%s__%s__population</A>'
doc_link2 = '<A HREF="http://www.urbansim.org/docs/indicators/population.xml">%s__%s__my_name</A>'
output = [
[ doc_link%('test',image_type),
dataset,
image_type,
('<A HREF="%s__%s__%s__1980.png">1980</A>,'
'<A HREF="%s__%s__%s__1982.png">1982</A>')%
(dataset, image_type, name, dataset, image_type, name)
],
[ doc_link2%('test',image_type2),
dataset2,
image_type2,
'<A HREF="%s__%s__%s.png">1980,1982</A>'%(dataset2,image_type2,name2)
],
[ doc_link%('test',image_type3),
dataset3,
image_type3,
'<A HREF="%s__%s__%s.png">1981</A>'%(dataset3,image_type3,name3)
],
[ doc_link%('test',image_type4),
dataset4,
image_type4,
('<A HREF="%s__%s__%s__1980.png">1980</A>,'
'<A HREF="%s__%s__%s__1982.png">1982</A>')%
(dataset4,image_type4,name4,dataset4,image_type4,name4)
]
]
for rqst in requests:
rqst.source_data = self.source_data
rows = []
self.i_results._output_body(self.source_data, requests, rows, test = True)
for i in range(len(output)):
if output[i] != rows[i]:
print output[i]
print rows[i]
#print ''
#for l in output: print l
#for l in rows: print l
self.assertEqual(output, rows)
if __name__=='__main__':
opus_unittest.main()
|
agpl-3.0
|
glneo/gnuradio
|
gr-digital/examples/example_costas.py
|
49
|
5316
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
pradyu1993/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
7
|
7349
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhoer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD Style.
from time import time
import numpy as np
import pylab as pl
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print __doc__
def create_species_bunch(species_name,
train, test,
coverages, xgrid, ygrid):
"""
create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
points = dict(test=test, train=train)
for label, pts in points.iteritems():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print ("Note: when more than two species are provided, only "
"the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print "_" * 80
print "Modeling distribution of species '%s'" % species.name
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print " - fit OneClassSVM ... ",
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print "done. "
# Plot map of South America
pl.subplot(1, 2, i + 1)
if basemap:
print " - plot coastlines using basemap"
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print " - plot coastlines from coverage"
pl.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
pl.xticks([])
pl.yticks([])
print " - predict species distribution"
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
pl.contourf(X, Y, Z, levels=levels, cmap=pl.cm.Reds)
pl.colorbar(format='%.2f')
# scatter training/testing points
pl.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
pl.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
pl.legend()
pl.title(species.name)
pl.axis('equal')
# Compute AUC w.r.t. background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
pl.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print "\n Area under the ROC curve : %f" % roc_auc
print "\ntime elapsed: %.2fs" % (time() - t0)
plot_species_distribution()
pl.show()
|
bsd-3-clause
|
MartinSavc/scikit-learn
|
sklearn/covariance/graph_lasso_.py
|
127
|
25626
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
|
bsd-3-clause
|
petosegan/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
72
|
47440
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
|
bsd-3-clause
|
YihaoLu/statsmodels
|
statsmodels/sandbox/tsa/diffusion2.py
|
38
|
13366
|
""" Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "...\diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "...\diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
l = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(l,m,nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
|
bsd-3-clause
|
ch3ll0v3k/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
zaxtax/scikit-learn
|
sklearn/model_selection/tests/test_search.py
|
23
|
30837
|
"""Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
bsd-3-clause
|
metpy/MetPy
|
metpy/plots/station_plot.py
|
1
|
25830
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Create Station-model plots."""
try:
from enum import Enum
except ImportError:
from enum34 import Enum
import matplotlib
import numpy as np
from .wx_symbols import (current_weather, high_clouds, low_clouds, mid_clouds,
pressure_tendency, sky_cover, wx_symbol_font)
from ..cbook import is_string_like
from ..package_tools import Exporter
from ..units import atleast_1d
exporter = Exporter(globals())
@exporter.export
class StationPlot(object):
"""Make a standard meteorological station plot.
Plots values, symbols, or text spaced around a central location. Can also plot wind
barbs as the center of the location.
"""
location_names = {'C': (0, 0), 'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S': (0, -1), 'SW': (-1, -1), 'W': (-1, 0), 'NW': (-1, 1)}
def __init__(self, ax, x, y, fontsize=10, spacing=None, transform=None, **kwargs):
"""Initialize the StationPlot with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`~matplotlib.axes.Axes` for plotting
x : array_like
The x location of the stations in the plot
y : array_like
The y location of the stations in the plot
fontsize : int
The fontsize to use for drawing text
spacing : int
The spacing, in points, that corresponds to a single increment between
station plot elements.
transform : matplotlib.transforms.Transform (or compatible)
The default transform to apply to the x and y positions when plotting.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
These will be passed to all the plotting methods, and thus need to be valid
for all plot types, such as `clip_on`.
"""
self.ax = ax
self.x = atleast_1d(x)
self.y = atleast_1d(y)
self.fontsize = fontsize
self.spacing = fontsize if spacing is None else spacing
self.transform = transform
self.items = {}
self.barbs = None
self.default_kwargs = kwargs
def plot_symbol(self, location, codes, symbol_mapper, **kwargs):
"""At the specified location in the station model plot a set of symbols.
This specifies that at the offset `location`, the data in `codes` should be
converted to unicode characters (for our :data:`wx_symbol_font`) using `symbol_mapper`,
and plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
codes : array_like
The numeric values that should be converted to unicode characters for plotting.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, current_weather_auto
from metpy.plots.wx_symbols import low_clouds, mid_clouds, high_clouds
from metpy.plots.wx_symbols import sky_cover, pressure_tendency
def plot_symbols(mapper, name, nwrap=12, figsize=(10, 1.4)):
# Determine how many symbols there are and layout in rows of nwrap
# if there are more than nwrap symbols
num_symbols = len(mapper)
codes = np.arange(len(mapper))
ncols = nwrap
if num_symbols <= nwrap:
nrows = 1
x = np.linspace(0, 1, len(mapper))
y = np.ones_like(x)
ax_height = 0.8
else:
nrows = int(ceil(num_symbols / ncols))
x = np.tile(np.linspace(0, 1, ncols), nrows)[:num_symbols]
y = np.repeat(np.arange(nrows, 0, -1), ncols)[:num_symbols]
figsize = (10, 1 * nrows + 0.4)
ax_height = 0.8 + 0.018 * nrows
fig = plt.figure(figsize=figsize, dpi=300)
ax = fig.add_axes([0, 0, 1, ax_height])
ax.set_title(name, size=20)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
# Plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_symbol('C', codes, mapper)
sp.plot_parameter((0, -1), codes, fontsize=18)
ax.set_ylim(-0.05, nrows + 0.5)
plt.show()
plot_symbols(current_weather, "Current Weather Symbols")
plot_symbols(current_weather_auto, "Current Weather Auto Reported Symbols")
plot_symbols(low_clouds, "Low Cloud Symbols")
plot_symbols(mid_clouds, "Mid Cloud Symbols")
plot_symbols(high_clouds, "High Cloud Symbols")
plot_symbols(sky_cover, "Sky Cover Symbols")
plot_symbols(pressure_tendency, "Pressure Tendency Symbols")
See Also
--------
plot_barb, plot_parameter, plot_text
"""
# Make sure we use our font for symbols
kwargs['fontproperties'] = wx_symbol_font.copy()
return self.plot_parameter(location, codes, symbol_mapper, **kwargs)
def plot_parameter(self, location, parameter, formatter='.0f', **kwargs):
"""At the specified location in the station model plot a set of values.
This specifies that at the offset `location`, the data in `parameter` should be
plotted. The conversion of the data values to a string is controlled by `formatter`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
parameter : array_like
The numeric values that should be plotted
formatter : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_symbol, plot_text
"""
text = self._to_string_list(parameter, formatter)
return self.plot_text(location, text, **kwargs)
def plot_text(self, location, text, **kwargs):
"""At the specified location in the station model plot a collection of text.
This specifies that at the offset `location`, the strings in `text` should be
plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
text : list (or array) of strings
The strings that should be plotted
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_parameter, plot_symbol
"""
location = self._handle_location(location)
kwargs = self._make_kwargs(kwargs)
text_collection = self.ax.scattertext(self.x, self.y, text, loc=location,
size=kwargs.pop('fontsize', self.fontsize),
**kwargs)
if location in self.items:
self.items[location].remove()
self.items[location] = text_collection
return text_collection
def plot_barb(self, u, v, **kwargs):
r"""At the center of the station model plot wind barbs.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the barbs.
v : array-like
The data to use for the v-component of the barbs.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Strip units, CartoPy transform doesn't like
u = np.array(u)
v = np.array(v)
# Empirically determined
pivot = 0.51 * np.sqrt(self.fontsize)
length = 1.95 * np.sqrt(self.fontsize)
defaults = {'sizes': {'spacing': .15, 'height': 0.5, 'emptybarb': 0.35},
'length': length, 'pivot': pivot}
defaults.update(kwargs)
# Remove old barbs
if self.barbs:
self.barbs.remove()
# Handle transforming our center points. CartoPy doesn't like 1D barbs
# TODO: This can be removed for cartopy > 0.14.3
if hasattr(self.ax, 'projection') and 'transform' in kwargs:
trans = kwargs['transform']
try:
kwargs['transform'] = trans._as_mpl_transform(self.ax)
except AttributeError:
pass
u, v = self.ax.projection.transform_vectors(trans, self.x, self.y, u, v)
# Since we've re-implemented CartoPy's barbs, we need to skip calling it here
self.barbs = matplotlib.axes.Axes.barbs(self.ax, self.x, self.y, u, v, **defaults)
else:
self.barbs = self.ax.barbs(self.x, self.y, u, v, **defaults)
def _make_kwargs(self, kwargs):
"""Assemble kwargs as necessary.
Inserts our defaults as well as ensures transform is present when appropriate.
"""
# Use default kwargs and update with additional ones
all_kw = self.default_kwargs.copy()
all_kw.update(kwargs)
# Pass transform if necessary
if 'transform' not in all_kw and self.transform:
all_kw['transform'] = self.transform
return all_kw
@staticmethod
def _to_string_list(vals, fmt):
"""Convert a sequence of values to a list of strings."""
if not callable(fmt):
def formatter(s):
"""Turn a format string into a callable."""
if hasattr(s, 'units'):
s = s.item()
return format(s, fmt)
else:
formatter = fmt
return [formatter(v) if np.isfinite(v) else '' for v in vals]
def _handle_location(self, location):
"""Process locations to get a consistent set of tuples for location."""
if is_string_like(location):
location = self.location_names[location]
xoff, yoff = location
return xoff * self.spacing, yoff * self.spacing
@exporter.export
class StationPlotLayout(dict):
r"""make a layout to encapsulate plotting using :class:`StationPlot`.
This class keeps a collection of offsets, plot formats, etc. for a parameter based
on its name. This then allows a dictionary of data (or any object that allows looking
up of arrays based on a name) to be passed to :meth:`plot()` to plot the data all at once.
See Also
--------
StationPlot
"""
class PlotTypes(Enum):
r"""Different plotting types for the layout.
Controls how items are displayed (e.g. converting values to symbols).
"""
value = 1
symbol = 2
text = 3
barb = 4
def add_value(self, location, name, fmt='.0f', units=None, **kwargs):
r"""Add a numeric value to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. The conversion of the data values to
a string is controlled by `fmt`. The units required for plotting can also
be passed in using `units`, which will cause the data to be converted before
plotting.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
fmt : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_text
"""
self[location] = (self.PlotTypes.value, name, (fmt, units, kwargs))
def add_symbol(self, location, name, symbol_mapper, **kwargs):
r"""Add a symbol to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. Data values will converted to glyphs
appropriate for MetPy's symbol font using the callable `symbol_mapper`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_text, add_value
"""
self[location] = (self.PlotTypes.symbol, name, (symbol_mapper, kwargs))
def add_text(self, location, name, **kwargs):
r"""Add a text field to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted directly as text with no conversion
applied.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple(float, float)
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_value
"""
self[location] = (self.PlotTypes.text, name, kwargs)
def add_barb(self, u_name, v_name, units=None, **kwargs):
r"""Add a wind barb to the center of the station layout.
This specifies that u- and v-component data should be pulled from the data
container using the keys `u_name` and `v_name`, respectively, and plotted as
a wind barb at the center of the station plot. If `units` are given, both
components will be converted to these units.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or line width.
Parameters
----------
u_name : str
The name of the parameter for the u-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
v_name : str
The name of the parameter for the v-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
add_symbol, add_text, add_value
"""
# Not sure if putting the v_name as a plot-specific option is appropriate,
# but it seems simpler than making name code in plot handle tuples
self['barb'] = (self.PlotTypes.barb, (u_name, v_name), (units, kwargs))
def names(self):
"""Get the list of names used by the layout.
Returns
-------
list[str]
the list of names of variables used by the layout
"""
ret = []
for item in self.values():
if item[0] == self.PlotTypes.barb:
ret.extend(item[1])
else:
ret.append(item[1])
return ret
def plot(self, plotter, data_dict):
"""Plot a collection of data using this layout for a station plot.
This function iterates through the entire specified layout, pulling the fields named
in the layout from `data_dict` and plotting them using `plotter` as specified
in the layout. Fields present in the layout, but not in `data_dict`, are ignored.
Parameters
----------
plotter : StationPlot
:class:`StationPlot` to use to plot the data. This controls the axes,
spacing, station locations, etc.
data_dict : dict[str, array-like]
Data container that maps a name to an array of data. Data from this object
will be used to fill out the station plot.
"""
def coerce_data(dat, u):
try:
return dat.to(u).magnitude
except AttributeError:
return dat
for loc, info in self.items():
typ, name, args = info
if typ == self.PlotTypes.barb:
# Try getting the data
u_name, v_name = name
u_data = data_dict.get(u_name)
v_data = data_dict.get(v_name)
# Plot if we have the data
if not (v_data is None or u_data is None):
units, kwargs = args
plotter.plot_barb(coerce_data(u_data, units), coerce_data(v_data, units),
**kwargs)
else:
# Check that we have the data for this location
data = data_dict.get(name)
if data is not None:
# If we have it, hand it to the appropriate method
if typ == self.PlotTypes.value:
fmt, units, kwargs = args
plotter.plot_parameter(loc, coerce_data(data, units), fmt, **kwargs)
elif typ == self.PlotTypes.symbol:
mapper, kwargs = args
plotter.plot_symbol(loc, data, mapper, **kwargs)
elif typ == self.PlotTypes.text:
plotter.plot_text(loc, data, **args)
def __repr__(self):
"""Return string representation of layout."""
return ('{'
+ ', '.join('{0}: ({1[0].name}, {1[1]}, ...)'.format(loc, info)
for loc, info in sorted(self.items()))
+ '}')
with exporter:
#: :desc: Simple station plot layout
simple_layout = StationPlotLayout()
simple_layout.add_barb('eastward_wind', 'northward_wind', 'knots')
simple_layout.add_value('NW', 'air_temperature', units='degC')
simple_layout.add_value('SW', 'dew_point_temperature', units='degC')
simple_layout.add_value('NE', 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
simple_layout.add_symbol('C', 'cloud_coverage', sky_cover)
simple_layout.add_symbol('W', 'present_weather', current_weather)
#: Full NWS station plot `layout`__
#:
#: __ http://oceanservice.noaa.gov/education/yos/resource/JetStream/synoptic/wxmaps.htm
nws_layout = StationPlotLayout()
nws_layout.add_value((-1, 1), 'air_temperature', units='degF')
nws_layout.add_symbol((0, 2), 'high_cloud_type', high_clouds)
nws_layout.add_symbol((0, 1), 'medium_cloud_type', mid_clouds)
nws_layout.add_symbol((0, -1), 'low_cloud_type', low_clouds)
nws_layout.add_value((1, 1), 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
nws_layout.add_value((-2, 0), 'visibility_in_air', fmt='.0f', units='miles')
nws_layout.add_symbol((-1, 0), 'present_weather', current_weather)
nws_layout.add_symbol((0, 0), 'cloud_coverage', sky_cover)
nws_layout.add_value((1, 0), 'tendency_of_air_pressure', units='mbar',
fmt=lambda v: ('-' if v < 0 else '') + format(10 * abs(v), '02.0f'))
nws_layout.add_symbol((2, 0), 'tendency_of_air_pressure_symbol', pressure_tendency)
nws_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
nws_layout.add_value((-1, -1), 'dew_point_temperature', units='degF')
# TODO: Fix once we have the past weather symbols converted
nws_layout.add_symbol((1, -1), 'past_weather', current_weather)
|
bsd-3-clause
|
tomlof/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
41
|
3668
|
from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import assert_raises, assert_true, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
dhermes/gcloud-python
|
bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py
|
2
|
9025
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import itertools
import json
try:
import fastavro
except ImportError: # pragma: NO COVER
fastavro = None
import google.api_core.exceptions
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
import six
from google.cloud.bigquery_storage_v1beta1 import types
_STREAM_RESUMPTION_EXCEPTIONS = (
google.api_core.exceptions.DeadlineExceeded,
google.api_core.exceptions.ServiceUnavailable,
)
_FASTAVRO_REQUIRED = "fastavro is required to parse Avro blocks"
class ReadRowsStream(object):
"""A stream of results from a read rows request.
This stream is an iterable of
:class:`~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse`.
Iterate over it to fetch all row blocks.
If the fastavro library is installed, use the
:func:`~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream.rows()`
method to parse all blocks into a stream of row dictionaries.
If the pandas and fastavro libraries are installed, use the
:func:`~google.cloud.bigquery_storage_v1beta1.reader.ReadRowsStream.to_dataframe()`
method to parse all blocks into a :class:`pandas.DataFrame`.
"""
def __init__(self, wrapped, client, read_position, read_rows_kwargs):
"""Construct a ReadRowsStream.
Args:
wrapped (Iterable[ \
~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \
]):
The ReadRows stream to read.
client ( \
~google.cloud.bigquery_storage_v1beta1.gapic. \
big_query_storage_client.BigQueryStorageClient \
):
A GAPIC client used to reconnect to a ReadRows stream. This
must be the GAPIC client to avoid a circular dependency on
this class.
read_position (Union[ \
dict, \
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \
]):
Required. Identifier of the position in the stream to start
reading from. The offset requested must be less than the last
row read from ReadRows. Requesting a larger offset is
undefined. If a dict is provided, it must be of the same form
as the protobuf message
:class:`~google.cloud.bigquery_storage_v1beta1.types.StreamPosition`
read_rows_kwargs (dict):
Keyword arguments to use when reconnecting to a ReadRows
stream.
Returns:
Iterable[ \
~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \
]:
A sequence of row blocks.
"""
# Make a copy of the read position so that we can update it without
# mutating the original input.
self._position = _copy_stream_position(read_position)
self._client = client
self._wrapped = wrapped
self._read_rows_kwargs = read_rows_kwargs
def __iter__(self):
"""An iterable of blocks.
Returns:
Iterable[ \
~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \
]:
A sequence of row blocks.
"""
# Infinite loop to reconnect on reconnectable errors while processing
# the row stream.
while True:
try:
for block in self._wrapped:
rowcount = block.avro_rows.row_count
self._position.offset += rowcount
yield block
return # Made it through the whole stream.
except _STREAM_RESUMPTION_EXCEPTIONS:
# Transient error, so reconnect to the stream.
pass
self._reconnect()
def _reconnect(self):
"""Reconnect to the ReadRows stream using the most recent offset."""
self._wrapped = self._client.read_rows(
_copy_stream_position(self._position), **self._read_rows_kwargs
)
def rows(self, read_session):
"""Iterate over all rows in the stream.
This method requires the fastavro library in order to parse row
blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadSession \
):
The read session associated with this read rows stream. This
contains the schema, which is required to parse the data
blocks.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
"""
if fastavro is None:
raise ImportError(_FASTAVRO_REQUIRED)
avro_schema = _avro_schema(read_session)
blocks = (_avro_rows(block, avro_schema) for block in self)
return itertools.chain.from_iterable(blocks)
def to_dataframe(self, read_session):
"""Create a :class:`pandas.DataFrame` of all rows in the stream.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row blocks.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadSession \
):
The read session associated with this read rows stream. This
contains the schema, which is required to parse the data
blocks.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if fastavro is None:
raise ImportError(_FASTAVRO_REQUIRED)
if pandas is None:
raise ImportError("pandas is required to create a DataFrame")
avro_schema = _avro_schema(read_session)
frames = []
for block in self:
dataframe = pandas.DataFrame(list(_avro_rows(block, avro_schema)))
frames.append(dataframe)
return pandas.concat(frames)
def _avro_schema(read_session):
"""Extract and parse Avro schema from a read session.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadSession \
):
The read session associated with this read rows stream. This
contains the schema, which is required to parse the data
blocks.
Returns:
A parsed Avro schema, using :func:`fastavro.schema.parse_schema`.
"""
json_schema = json.loads(read_session.avro_schema.schema)
return fastavro.parse_schema(json_schema)
def _avro_rows(block, avro_schema):
"""Parse all rows in a stream block.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadSession \
):
The read session associated with this read rows stream. This
contains the schema, which is required to parse the data
blocks.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
"""
blockio = six.BytesIO(block.avro_rows.serialized_binary_rows)
while True:
# Loop in a while loop because schemaless_reader can only read
# a single record.
try:
# TODO: Parse DATETIME into datetime.datetime (no timezone),
# instead of as a string.
yield fastavro.schemaless_reader(blockio, avro_schema)
except StopIteration:
break # Finished with block
def _copy_stream_position(position):
"""Copy a StreamPosition.
Args:
position (Union[ \
dict, \
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition \
]):
StreamPostion (or dictionary in StreamPosition format) to copy.
Returns:
~google.cloud.bigquery_storage_v1beta1.types.StreamPosition:
A copy of the input StreamPostion.
"""
if isinstance(position, types.StreamPosition):
output = types.StreamPosition()
output.CopyFrom(position)
return output
return types.StreamPosition(**position)
|
apache-2.0
|
StuartLittlefair/astropy
|
astropy/time/tests/test_basic.py
|
1
|
89645
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
import functools
import datetime
from copy import deepcopy
from decimal import Decimal, localcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose
import erfa
from erfa import ErfaWarning
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo, TIME_FORMATS)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy.table import Column, Table
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
allclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 0.00037179926839122024,
-0.5 + 0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.) / 10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843728'
assert t.tcb.iso == '2006-01-15 21:25:56.8939523'
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843725'
assert t.tcb.iso == '2006-01-15 21:25:56.8939519'
# Check we get the same result
t2 = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
location=(0*u.m, 0*u.m, 0*u.m))
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5'] * 3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = f'{year + 1:04d}-01-01'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize("d", [
dict(val="2001:001", val2="ignored", scale="utc"),
dict(val={'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
val2="ignored", scale="utc"),
dict(val=np.datetime64('2005-02-25'), val2="ignored", scale="utc"),
dict(val=datetime.datetime(2000, 1, 2, 12, 0, 0),
val2="ignored", scale="utc"),
])
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = (2458000 + np.arange(3))
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time('54321.000000000001', format='mjd')
assert t == Time(54321, 1e-12, format='mjd')
assert t.mjd == 54321. # Lost precision!
assert t.value == 54321. # Lost precision!
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', 'bytes') == b'54321.000000000001'
expected_long = np.longdouble(54321.) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(t.to_value('mjd', subfmt='long'),
expected_long, rtol=0, atol=np.finfo(float).eps)
t.out_subfmt = 'str'
assert t.value == '54321.000000000001'
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.mjd == '54321.000000000001'
assert t.to_value('mjd', subfmt='bytes') == b'54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
t.out_subfmt = 'long'
assert np.allclose(t.value, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.to_value('mjd', subfmt=None), expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.mjd, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format='mjd')
expected = Time(i, f, format='mjd')
assert abs(t - expected) <= 20. * u.ps
t_float = Time(i + f, format='mjd')
assert t_float == Time(i, format='mjd')
assert t_float != t
assert t.value == 54321. # Lost precision!
assert np.allclose(t.to_value('mjd', subfmt='long'), mjd_long,
rtol=0., atol=np.finfo(float).eps)
t2 = Time(mjd_long, format='mjd', out_subfmt='long')
assert np.allclose(t2.value, mjd_long,
rtol=0., atol=np.finfo(float).eps)
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
t1 = Time(i, f, format='mjd')
t2 = Time(np.longdouble(i), f, format='mjd')
t3 = Time(i, np.longdouble(f), format='mjd')
t4 = Time(np.longdouble(i), np.longdouble(f), format='mjd')
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1. if fmt == 'mjd' else 24. * 3600.)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt='long')
assert np.allclose(tm_long2, t_fmt_long2, rtol=0., atol=atol)
def test_subformat_input(self):
s = '54321.01234567890123456789'
i, f = s.split('.') # Note, OK only for fraction < 0.5
t = Time(float(i), float('.' + f), format='mjd')
t_str = Time(s, format='mjd')
t_bytes = Time(s.encode('ascii'), format='mjd')
t_decimal = Time(Decimal(s), format='mjd')
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize('out_subfmt', ('str', 'bytes'))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0., 1e-9, 1e-12])
t = Time(i, f, format='mjd', out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(['54321.0',
'54321.000000001',
'54321.000000000001'], dtype=out_subfmt)
assert np.all(t_value == expected)
assert np.all(Time(expected, format='mjd') == t)
# Explicit sub-format.
t = Time(i, f, format='mjd')
t_mjd_subfmt = t.to_value('mjd', subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize('fmt,string,val1,val2', [
('jd', '2451544.5333981', 2451544.5, .0333981),
('decimalyear', '2000.54321', 2000., .54321),
('cxcsec', '100.0123456', 100.0123456, None),
('unix', '100.0123456', 100.0123456, None),
('gps', '100.0123456', 100.0123456, None),
('byear', '1950.1', 1950.1, None),
('jyear', '2000.1', 2000.1, None)])
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt='str') == string
def test_basic_subformat_setting(self):
t = Time('2001', format='jyear', scale='tai')
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time('2001', format='jyear', scale='tai')
t.to_value('mjd', subfmt='str')
assert ('mjd', 'str') in t.cache['format']
t.to_value('mjd', 'str')
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time('2001', format='jyear', scale='tai')
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time('2001', format='jyear', scale='tai')
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert t_s_2 == t2_s_40, "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value('mjd', subfmt='decimal')
t2 = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value('mjd', subfmt='decimal')
t2_s_40 = t2.to_value('mjd', subfmt='decimal')
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize("f, s, t", [("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str)])
def test_timedelta_basic(self, f, s, t):
dt = (Time("58000", format="mjd", scale="tai")
- Time("58001", format="mjd", scale="tai"))
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time('J2000')
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match='format must be one of'):
t.to_value('julian')
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match='not among selected'):
Time("58000", format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(np.longdouble(58000), format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='str')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='long')
def test_wrong_subfmt(self):
t = Time(58000., format='mjd')
with pytest.raises(ValueError, match='must match one'):
t.to_value('mjd', subfmt='parrot')
with pytest.raises(ValueError, match='must match one'):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match='must match one'):
t.in_subfmt = 'parrot'
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time('J2000')
match = 'subformat not allowed for format jyear_str'
with pytest.raises(ValueError, match=match):
t.to_value('jyear_str', subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', out_subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.in_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', format='jyear_str', in_subfmt='parrot')
def test_switch_to_format_with_no_out_subfmt(self):
t = Time('2001-01-01', out_subfmt='date_hm')
assert t.out_subfmt == 'date_hm'
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = 'jyear_str'
assert t.out_subfmt == '*'
assert t.value == 'J2001.001'
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r'bad day \(JD computed\)') as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time('2320-01-01', scale='tai').stardate)[:7] == '1368.99'
assert str(Time('2330-01-01', scale='tai').stardate)[:8] == '10552.76'
assert str(Time('2340-01-01', scale='tai').stardate)[:8] == '19734.02'
@pytest.mark.parametrize('dates',
[(10000, '2329-05-26 03:02'),
(20000, '2340-04-15 19:05'),
(30000, '2351-03-07 11:08')])
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format='stardate')
t_iso = Time(t_star, format='iso', out_subfmt='date_hm')
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1. / 24. / 3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format='cxcsec')
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format='cxcsec')
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr('astropy.utils.iers.conf.auto_download', True)
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time('2020-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time('1970-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time('2000:001', scale='utc')
t[()] = '2000:002'
assert t.value.startswith('2000:002')
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time('2000:001', format='not-a-format')
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time('200')
assert 'Input values did not match any of the formats where' in str(err.value)
with pytest.raises(ValueError) as err:
Time('200', format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'ValueError: Time 200 does not match iso format') == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'TypeError: Input values for iso class must be strings') == str(err.value)
def test_ymdhms_defaults():
t1 = Time({'year': 2001}, format='ymdhms')
assert t1 == Time('2001-01-01')
times_dict_ns = {
'year': [2001, 2002],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [8, 9],
'second': [10, 11]
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ('year', 'month', 'day', 'hour', 'minute', 'second')
@pytest.mark.parametrize('tm_input', [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
@pytest.mark.parametrize('as_row', [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(['2001-02-04 06:08:10', '2002-03-05 07:09:11'])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {
'year': [[2001, 2002],
[2003, 2004]],
'month': [2, 3],
'day': 4
}
time_shape = Time(
[['2001-02-04', '2002-03-04'],
['2003-02-04', '2004-03-04']]
)
time = Time(times_dict_shape, format='ymdhms')
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
'year': 2016,
'month': 12,
'day': 31,
'hour': 23,
'minute': 59,
'second': 60.123456789}
tm = Time(time_dict, **kwargs)
assert tm == Time('2016-12-31T23:59:60.123456789')
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == 'second':
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match='input must be dict or table-like'):
Time(10, format='ymdhms')
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({'year': 2019, 'wrong': 1}, format='ymdhms')
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({'year': 2019, 'minute': 1}, format='ymdhms')
def test_ymdhms_masked():
tm = Time({'year': [2000, 2001]}, format='ymdhms')
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time({'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
scale='utc')
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time('J2015') + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time('2019-12-20', out_subfmt='date_??')
assert t.mjd == 58837.0
assert t.yday == '2019:354:00:00' # Preserves out_subfmt
t2 = t.replicate(format='mjd')
assert t2.out_subfmt == '*' # Changes to default
t2 = t.copy(format='mjd')
assert t2.out_subfmt == '*'
t2 = Time(t, format='mjd')
assert t2.out_subfmt == '*'
t2 = t.copy(format='yday')
assert t2.out_subfmt == 'date_??'
assert t2.value == '2019:354:00:00'
t.format = 'yday'
assert t.value == '2019:354:00:00'
assert t.out_subfmt == 'date_??'
t = Time('2019-12-20', out_subfmt='date')
assert t.mjd == 58837.0
assert t.yday == '2019:354'
@pytest.mark.parametrize('fmt_name,fmt_class', TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time('2000-01-01')
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, '*']
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize('location', [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time('J2010', location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location) # noqa
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])['col0']
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time('J2010', location=(45, 45))
tm2 = Time('J2010')
with pytest.raises(ValueError,
match='cannot concatenate times unless all locations'):
Time([tm, tm2])
|
bsd-3-clause
|
hainn8x/gnuradio
|
gr-filter/examples/chirp_channelize.py
|
58
|
7169
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
dsm054/pandas
|
pandas/tests/indexes/datetimes/test_astype.py
|
1
|
12095
|
from datetime import datetime
import dateutil
from dateutil.tz import tzlocal
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DatetimeIndex, Index, Int64Index, NaT, Period, Series, Timestamp,
date_range)
import pandas.util.testing as tm
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_object(self):
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_astype_object_tz(self, tz):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz=tz)
expected_list = [Timestamp('2013-01-31', tz=tz),
Timestamp('2013-02-28', tz=tz),
Timestamp('2013-03-31', tz=tz),
Timestamp('2013-04-30', tz=tz)]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]', 'datetime64',
'datetime64[D]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
msg = 'Cannot cast DatetimeIndex to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_index_convert_to_datetime_array(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_explicit_pytz(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz=pytz.timezone('US/Eastern'))
rng_utc = date_range('20090415', '20090519', tz=pytz.utc)
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_dateutil(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz='dateutil/US/Eastern')
rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.tzutc())
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
@pytest.mark.parametrize('tz, dtype', [
['US/Pacific', 'datetime64[ns, US/Pacific]'],
[None, 'datetime64[ns]']])
def test_integer_index_astype_datetime(self, tz, dtype):
# GH 20997, 20964
val = [pd.Timestamp('2018-01-01', tz=tz).value]
result = pd.Index(val).astype(dtype)
expected = pd.DatetimeIndex(['2018-01-01'], tz=tz)
tm.assert_index_equal(result, expected)
class TestToPeriod(object):
def setup_method(self, method):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
self.index = DatetimeIndex(data)
def test_to_period_millisecond(self):
index = self.index
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
period = index.to_period(freq='L')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123Z', 'L')
assert period[1] == Period('2007-01-01 10:11:13.789Z', 'L')
def test_to_period_microsecond(self):
index = self.index
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
period = index.to_period(freq='U')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123456Z', 'U')
assert period[1] == Period('2007-01-01 10:11:13.789123Z', 'U')
@pytest.mark.parametrize('tz', [
'US/Eastern', pytz.utc, tzlocal(), 'dateutil/US/Eastern',
dateutil.tz.tzutc()])
def test_to_period_tz(self, tz):
ts = date_range('1/1/2000', '2/1/2000', tz=tz)
with tm.assert_produces_warning(UserWarning):
# GH#21333 warning that timezone info will be lost
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
expected = date_range('1/1/2000', '2/1/2000').to_period()
with tm.assert_produces_warning(UserWarning):
# GH#21333 warning that timezone info will be lost
result = ts.to_period()
tm.assert_index_equal(result, expected)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
assert idx.freqstr == 'D'
expected = pd.PeriodIndex(['2000-01-01', '2000-01-02',
'2000-01-03'], freq='D')
tm.assert_index_equal(idx.to_period(), expected)
# GH 7606
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
assert idx.freqstr is None
tm.assert_index_equal(idx.to_period(), expected)
|
bsd-3-clause
|
jenfly/monsoon-onset
|
scripts/save-energy-budget.py
|
1
|
5248
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import collections
import atmos as atm
import merra
import indices
import utils
# ----------------------------------------------------------------------
version = 'merra2'
yearstr = '1980-2015'
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
savedir = atm.homedir() + 'datastore/%s/figure_data/' % version
ndays = 5 # Rolling pentad
lon1, lon2 = 60, 100
eqlat1, eqlat2 = -5, 5
varnms = ['UFLXCPT', 'UFLXQV', 'UFLXPHI', 'VFLXCPT', 'VFLXQV', 'VFLXPHI',
'LWTUP', 'SWGNT', 'LWGNT', 'SWTNT', 'HFLUX', 'EFLUX']
filestr = datadir + version + '_%s_dailyrel_CHP_MFC_' + yearstr + '.nc'
datafiles = {nm : filestr % nm for nm in varnms}
savestr = savedir + version + '_%s_' + yearstr + '.nc'
savefiles = {}
for nm in ['energy_budget', 'energy_budget_sector', 'energy_budget_eq']:
savefiles[nm] = savestr % nm
# ----------------------------------------------------------------------
data = xray.Dataset()
for nm in varnms:
filenm = datafiles[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = atm.subset(ds[nm], {'lat' : (-60, 60)})
daydim = atm.get_coord(var, 'dayrel', 'dim')
data[nm] = atm.rolling_mean(var, ndays, axis=daydim)
data['NETRAD'] = data['SWTNT'] - data['LWTUP'] - data['SWGNT']- data['LWGNT']
data['FNET'] = data['NETRAD'] + data['EFLUX'] + data['HFLUX']
Lv = atm.constants.Lv.values
for nm in ['UFLXQV', 'VFLXQV']:
key = nm.replace('QV', 'LQV')
data[key] = data[nm] * Lv
data[key].attrs['units'] = 'J m-1 s-1'
data['UH'] = data['UFLXCPT'] + data['UFLXPHI'] + data['UFLXLQV']
data['VH'] = data['VFLXCPT'] + data['VFLXPHI'] + data['VFLXLQV']
data.attrs['ndays'] = ndays
print('Saving to ' + savefiles['energy_budget'])
data.to_netcdf(savefiles['energy_budget'])
# Sector mean data
data_sector = atm.dim_mean(data, 'lon', lon1, lon2)
data_sector.attrs['lon1'] = lon1
data_sector.attrs['lon2'] = lon2
# Equator mean data
data_eq = atm.dim_mean(data, 'lat', eqlat1, eqlat2)
data_eq.attrs['eqlat1'] = eqlat1
data_eq.attrs['eqlat2'] = eqlat2
a = atm.constants.radius_earth.values
dx = a * np.radians(lon2-lon1)
var = data_eq['UH']
data_eq['UH_DX'] = (var.sel(lon=lon2) - var.sel(lon=lon1))/dx
data_eq_bar = atm.dim_mean(data_eq, 'lon', lon1, lon2)
var = atm.subset(data_sector['VH'], {'lat' : (-30, 30)})
daydim = atm.get_coord(var, 'dayrel', 'dim')
var = atm.rolling_mean(var, ndays, axis=daydim)
days = atm.get_coord(var, 'dayrel')
lat = atm.get_coord(var, 'lat')
zerolat = np.nan * np.ones(len(days))
latmin = -15
latmax = 15
for i, day in enumerate(days[ndays:-ndays]):
print(day)
zerolat[i] = utils.find_zeros_1d(lat, var.sel(dayrel=day), latmin, latmax,
interp=0.1, return_type='min')
#cint = atm.cinterval(var, n_pref=50, symmetric=True)
#clevs = atm.clevels(var, cint, symmetric=True)
clevs = np.arange(-4e9, 4.1e9, 0.2e9)
plt.figure()
plt.contourf(days, lat, var.T, clevs, cmap='RdBu_r', extend='both')
plt.colorbar()
plt.grid()
plt.plot(days, zerolat, 'k', linewidth=2)
#plt.contour(days, lat, var.T, [0], colors='0.5', linewidths=2)
# ----------------------------------------------------------------------
# Radiation terms - monthly data
#
# def concat_years_rad(years, datadir, nms_rad, subset_dict={'lon' : (40, 120)}):
#
# def monthly_rad(datafiles, year, nms_rad, concat_dim='time'):
# ds = atm.load_concat(datafiles, var_ids=nms_rad, concat_dim=concat_dim)
# ds = ds.rename({concat_dim : 'month'})
# ds['month'] = range(1, 13)
# for nm in ds.data_vars:
# ds[nm] = atm.expand_dims(ds[nm], 'year', year, axis=0)
# return ds
#
# prod = {yr : 100 for yr in range(1980, 1992)}
# for yr in range(1992, 2001):
# prod[yr] = 200
# for yr in range(2001, 2011):
# prod[yr] = 300
# for yr in range(2011, 2016):
# prod[yr] = 400
#
# filestr = datadir + 'MERRA2_%d.tavgM_2d_rad_Nx.%d%02d.nc4'
# files = {}
# months = range(1, 13)
# for yr in years:
# files[yr] = [filestr % (prod[yr], yr, mon) for mon in months]
#
# for i, year in enumerate(files):
# dsyr = monthly_rad(files[year], year, nms_rad)
# dsyr = atm.subset(dsyr, subset_dict)
# if i == 0:
# ds = dsyr
# else:
# ds = xray.concat([ds, dsyr], dim='year')
# return ds
#
#
# nms_rad = ['SWTNT', 'LWTUP', 'SWGNT', 'LWGNT']
# ds_rad = concat_years_rad(years, datadir2, nms_rad)
# ds_rad['NETRAD'] = (ds_rad['SWTNT'] - ds_rad['LWTUP'] - ds_rad['SWGNT']
# - ds_rad['LWGNT'])
# ds_rad['NETRAD'].attrs['long_name'] = 'Net radiation into atmospheric column'
# ds_rad['NETRAD'].attrs['units'] = ds_rad['SWTNT'].attrs['units']
#
# savefile = datadir2 + 'merra2_rad_1980-2015.nc4'
# ds_rad.to_netcdf(savefile)
# ----------------------------------------------------------------------
# Julian day climatologies of EFLUX, HFLUX, uh, vh
|
mit
|
ghorn/debian-casadi
|
experimental/joel/test_acado_integrator.py
|
1
|
2870
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from numpy import *
import matplotlib.pyplot as plt
# CasADi
from casadi import *
# End time
tf = 1.0
# Variables
t = ssym("t")
x = ssym("x")
z = ssym("z")
p = ssym("p")
q = ssym("q")
# Differential equation input argument
ffcn_in = SXVector(DAE_NUM_IN)
ffcn_in[DAE_T] = t
ffcn_in[DAE_Y] = vertcat((x,z))
ffcn_in[DAE_P] = vertcat((p,q))
# Differential equation output argument
ffcn_out = [vertcat((-p*x*x*z, \
q*q - z*z + 0.1*x))]
# Differential equation
ffcn = SXFunction(ffcn_in,ffcn_out)
# Create integrator
integrator = AcadoIntegrator(ffcn)
integrator.setOption("time_dependence",False)
integrator.setOption("num_algebraic",1)
integrator.setOption("num_grid_points",100)
integrator.setOption("tf",tf)
integrator.init()
# Initial conditions
xz0 = array([1.0, 1.000000])
pq0 = array([1.0, 1.0])
integrator.setInput(xz0, "x0")
integrator.setInput(pq0, "p")
# Seeds
integrator.setFwdSeed([0.,0.], "x0")
integrator.setFwdSeed([1.,0.], "p")
integrator.setAdjSeed([1.,0.], "xf")
# Integrate with forward and adjoint sensitivities
integrator.evaluate(1,0)
#integrator.evaluate(1,1) # NOTE ACADO does not support adjoint mode AD using interfaced functions
# Result
print "final state = ", integrator.output("xf").data(), " (XF)"
print "forward sensitivities = ", integrator.fwdSens("xf").data(), " (XF)"
#print "adjoint sensitivities = ", integrator.adjSens("x0").data(), " (X0), ", integrator.adjSens("p").data(), " (P)"
# Create a simulator
tgrid = numpy.linspace(0,tf,100)
simulator = Simulator(integrator,tgrid)
simulator.init()
simulator.setInput(xz0, "x0")
simulator.setInput(pq0, "p")
simulator.evaluate()
plt.clf()
plt.plot(tgrid,simulator.getOutput())
plt.legend(('differential state', 'algebraic state'))
plt.grid(True)
plt.show()
print "Script finished"
|
lgpl-3.0
|
lukeiwanski/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
46
|
13101
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
jwlawson/tensorflow
|
tensorflow/examples/get_started/regression/imports85.py
|
24
|
6638
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
|
apache-2.0
|
KaiYan0729/nyu_python
|
advanced_python/assignments/proj3/data_project.py
|
1
|
5994
|
import numpy as np
import pandas as pd
#import matplotlib
import matplotlib.pyplot as plt
import datetime
import pandas_datareader.data as web
import math
import scipy.optimize as sco
#download data from Yahoo Finance and stock growth visuslization
def get_data(ticker):
# set the staring time and ending time
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2014, 12, 31)
# user input equity tickers
data = pd.DataFrame()
data[ticker] = web.DataReader(ticker, 'yahoo', start, end)['Adj Close']
data.colums = ticker
#plot stock price over time
data.plot(figsize=(8, 5))
plt.title('Stock Price Over Time')
plt.xlabel('Time')
plt.ylabel('Stock Price')
plt.show()
#print('\n----------Price Series---------')
#print(data)
return data
#Data Analysis to calculate the Return, Risk, and Correlation for each stock
def data_analysis(weights, data):
rets = np.log(data / data.shift(-1))
rf = 0.05 # risk free interest rate, constant
weights = np.array(weights)
annual_ret = np.sum(rets.mean() * weights) * 252
annual_vol = np.sqrt(np.dot(weights.T, np.dot(rets.cov() * 252, weights)))
return np.array([annual_ret, annual_vol, (annual_ret - rf) / annual_vol])
#Define onjection function for portfolio optimization
def objective_function(intial_weights, data, choice):
if choice == 'Maximize Return':
func = -data_analysis(intial_weights, data)[0]
elif choice == 'Minimize Volatility':
func = data_analysis(intial_weights, data)[1]
elif choice == 'Maxmize Sharpe Ratio':
func = -data_analysis(intial_weights, data)[2]
return func
#calculate optimal weight for each stock
def optimal_weight(numTicker, data, choice):
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bnds = tuple((0, 1) for x in range(numTicker))
intial_weights = numTicker * [1 / numTicker]
statistics = sco.minimize(objective_function, intial_weights, args=(data, choice,), method='SLSQP', bounds=bnds,
constraints=cons)
weights = statistics['x'].round(2)
return weights
#statistics for the portfolio
def test(final_weight, data, choice):
output = data_analysis(final_weight, data)
print('If your goal is to {}'.format(choice),
', then your portfolio statistics is:\nannualized return = {}'.format(output[0].round(3)),
'\nvolatilites = {}'.format(output[1].round(3)), '\nsharpe ratio = {}'.format(output[2].round(3)))
#portfolio growth visualization
def portfolio_growth_visualization(data, weights, choice):
rets = np.log(data / data.shift(-1))
value = rets * weights
portRet = np.sum(value, axis=1)
portRet = np.flip(portRet, 0)
portRet = portRet.cumsum()
portVal = 10000 * np.exp(portRet) #portfolio value started with $10000 initial investment
#graph, portfolio growth
portVal.plot(figsize=(8, 5))
plt.title('Portfolio Growth Visualization, Type:{}'.format(choice))
plt.xlabel('Time')
plt.ylabel('Portfolio Value')
plt.show()
return portVal
#portfolio risk visualization(measured by 5% Value-at-Risk(VaR)
def value_at_risk_visualization(data,weights,choice):
rets = np.log(data / data.shift(-1))
value = rets * weights
portRet = np.sum(value, axis=1)
portRet = np.flip(portRet, 0)
portRet = portRet.cumsum()
portVal = 10000 * np.exp(portRet)
portVal = pd.Series.to_frame(portVal)
#graph, portfolio returns
portVal.plot.hist()
plt.title('Portfolio 5% Value-at-Risk Visualization, Type:{}'.format(choice))
plt.xlabel('Daily Return')
plt.ylabel('Frequency')
plt.grid(True)
plt.show()
#calculate portfolio risk, the 5th percentile(VaR)
p5 = np.percentile(portVal, 5)
return p5
def main():
ticker = ['BK', 'AAPL', 'LUV', 'MCO', 'DVA']
# user_ticker = input('Please enter the stock ticker you want to include in your portfolio--->')
# ticker = user_ticker.split(',')
numTicker = len(ticker)
data = get_data(ticker)
# max return
choice = 'Maximize Return'
final_weight = optimal_weight(numTicker, data, choice)
dict_weights = dict(zip(ticker, final_weight))
print('----------Optimal weights if your goal is to {}'.format(choice), '---------')
print(dict_weights)
print('\nIn sample test results--->')
test_result = test(final_weight, data, choice)
portfolio_growth_visualization(data, final_weight, choice)
var5 = value_at_risk_visualization(data,final_weight,choice) #calculate 5% Value at Risk(VaR)
print('\nThe 5% VaR is --->')
print(var5)
# min volatility
choice = 'Minimize Volatility'
final_weight = optimal_weight(numTicker, data, choice)
dict_weights = dict(zip(ticker, final_weight))
print('----------Optimal weights if your goal is to {}'.format(choice), '---------')
print(dict_weights)
print('\nIn sample test results--->')
test_result = test(final_weight, data, choice)
portfolio_growth_visualization(data, final_weight, choice)
var5 = value_at_risk_visualization(data,final_weight,choice) #calculate 5% Value at Risk(VaR)
print('\nThe 5% VaR is --->')
print(var5)
# max sharpe ratio
choice = 'Maxmize Sharpe Ratio'
final_weight = optimal_weight(numTicker, data, choice)
dict_weights = dict(zip(ticker, final_weight))
print('----------Optimal weights if your goal is to {}'.format(choice), '---------')
print(dict_weights)
print('\nIn sample test results--->')
test_result = test(final_weight, data, choice)
portfolio_growth_visualization(data, final_weight, choice)
var5 = value_at_risk_visualization(data,final_weight,choice) #calculate 5% Value at Risk(VaR)
print('\nThe 5% VaR is --->')
print(var5)
if __name__ == '__main__':
main()
|
mit
|
dvro/UnbalancedDataset
|
imblearn/under_sampling/tests/test_one_sided_selection.py
|
2
|
5317
|
"""Test the module one-sided selection."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from collections import Counter
from imblearn.under_sampling import OneSidedSelection
# Generate a global dataset to use
RND_SEED = 0
X = np.array([[-0.3879569, 0.6894251],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[0.91542919, -0.65453327],
[-0.03852113, 0.40910479],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.18430329, 0.52328473],
[-0.30126957, -0.66268378],
[-0.65571327, 0.42412021],
[-0.28305528, 0.30284991],
[0.20246714, -0.34727125],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087]])
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
def test_oss_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(OneSidedSelection)
def test_oss_init():
"""Test the initialisation of the object"""
# Define a ratio
oss = OneSidedSelection(random_state=RND_SEED)
assert_equal(oss.size_ngh, 1)
assert_equal(oss.n_seeds_S, 1)
assert_equal(oss.n_jobs, -1)
assert_equal(oss.random_state, RND_SEED)
def test_oss_fit_single_class():
"""Test either if an error when there is a single class"""
# Create the object
oss = OneSidedSelection(random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, oss.fit, X, y_single_class)
def test_oss_fit():
"""Test the fitting method"""
# Create the object
oss = OneSidedSelection(random_state=RND_SEED)
# Fit the data
oss.fit(X, Y)
# Check if the data information have been computed
assert_equal(oss.min_c_, 0)
assert_equal(oss.maj_c_, 1)
assert_equal(oss.stats_c_[0], 6)
assert_equal(oss.stats_c_[1], 9)
def test_oss_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Create the object
oss = OneSidedSelection(random_state=RND_SEED)
assert_raises(RuntimeError, oss.sample, X, Y)
def test_oss_fit_sample():
"""Test the fit sample routine"""
# Resample the data
oss = OneSidedSelection(random_state=RND_SEED)
X_resampled, y_resampled = oss.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251],
[0.91542919, -0.65453327],
[-0.65571327, 0.42412021],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.30126957, -0.66268378],
[0.20246714, -0.34727125]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_oss_fit_sample_with_indices():
"""Test the fit sample routine with indices support"""
# Resample the data
oss = OneSidedSelection(return_indices=True, random_state=RND_SEED)
X_resampled, y_resampled, idx_under = oss.fit_sample(X, Y)
X_gt = np.array([[-0.3879569, 0.6894251],
[0.91542919, -0.65453327],
[-0.65571327, 0.42412021],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.30126957, -0.66268378],
[0.20246714, -0.34727125]])
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
idx_gt = np.array([0, 3, 9, 12, 13, 14, 1, 2, 5, 6, 7, 10])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_oss_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
oss = OneSidedSelection(random_state=RND_SEED)
oss.fit(X, Y)
assert_raises(RuntimeError, oss.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_multiclass_error():
""" Test either if an error is raised when the target are not binary
type. """
# continuous case
y = np.linspace(0, 1, 15)
oss = OneSidedSelection(random_state=RND_SEED)
assert_warns(UserWarning, oss.fit, X, y)
# multiclass case
y = np.array([0] * 10 + [1] * 3 + [2] * 2)
oss = OneSidedSelection(random_state=RND_SEED)
assert_warns(UserWarning, oss.fit, X, y)
|
mit
|
p5a0u9l/clamm
|
clamm/streams/to_tracks.py
|
1
|
12884
|
"""
streams module contains classes, programs, tools for creating
and processing audio streams.
"""
import os
import wave
from glob import glob
import sys
import matplotlib.pyplot as plt
from tqdm import trange
import numpy as np
import taglib
from nltk import distance
import itunespy
from clamm import config
from clamm import util
# constants, globals
plt.switch_backend("agg")
DF = config["streams"]["downsample_factor"]
DF = 4410 * 10
FS = 44100
FS_DEC = FS / DF
SAMP2MIN = 1 / FS / 60
MS2SEC = 1 / 1000
MS2MIN = MS2SEC / 60
class StreamError(Exception):
""" StreamError """
def __init__(self, expression, message):
self.expression = expression
self.message = message
class Stream():
""" Stream """
def __init__(self, streampath):
""" """
self.pcmpath = streampath
self.wavpath = streampath.replace("pcm", "wav")
self.query = []
self.artist = []
self.album = []
self.name = []
self.threshold = 8
def pcm2wav(self):
""" pcm2wav """
if not os.path.exists(self.wavpath):
util.pcm2wav(self.pcmpath, self.wavpath)
def decode_path(self):
"""artist/album names from stream name
"""
tmp = self.pcmpath.replace(".pcm", "")
[artist, album] = tmp.split(";")
self.artist, self.album = os.path.split(artist)[-1], album.strip()
util.printr("Found and Parsed {} --> {} as target...".format(
self.artist, self.album))
self.name = "{}; {}".format(self.artist, self.album)
return self
def itunes_query(self):
"""seek an iTunes ``collection_id`` by iterating over albums
of from a search artist and finding the minimum
``nltk.distance.edit_distance``
"""
min_dist = 10000
for aquery in itunespy.search_album(self.artist):
dist = distance.edit_distance(aquery.collection_name, self.album)
if dist < min_dist:
min_dist = dist
min_query = aquery
if min_dist < self.threshold:
self.query = itunespy.lookup(id=min_query.collection_id)[0]
if not self.query:
sys.exit("ERROR: album search failed...")
return self
def prepare_target(self):
artist_dir = os.path.join(
config["path"]["library"],
self.query.artist_name)
self.target = os.path.join(artist_dir, self.query.collection_name)
if not os.path.exists(artist_dir):
os.mkdir(artist_dir)
if not os.path.exists(self.target):
os.mkdir(self.target)
return self
def flacify(self):
"""convert all wav files in target directory to flac files
"""
map(
util.wav2flac,
glob(os.path.join(self.target, "*wav")))
return self
def tagify(self):
"""Use itunes_query to populate audio track tags.
"""
for i, track in enumerate(self.query.get_tracks()):
tracknum = "%0.2d" % (i + 1)
globber = glob(os.path.join(self.target, tracknum + "*flac"))
flac = taglib.File(globber[0])
flac.tags["ALBUM"] = [self.query.collection_name]
flac.tags["ALBUMARTIST"] = [self.query.artist_name]
flac.tags["ARTIST"] = [track.name]
flac.tags["TRACKNUMBER"] = [str(track.number)]
flac.tags["DATE"] = [self.query.release_date]
flac.tags["LABEL"] = [self.query.copyright]
flac.tags["GENRE"] = [self.query.primary_genre_name]
flac.tags["TITLE"] = [track.name]
flac.tags["COMPILATION"] = ["0"]
flac.save()
flac.close()
class Album():
"""Process an audio stream into an Album
Attributes
----------
wavstream: wave.Wave_read
Read access to wave file
framerate: int
rate, in Hz, of channel samples
track_list: list
list of itunespy.track.Track objects containing track tags
wavstream: wave.File
"""
def __init__(self, stream):
self.wavstream = wave.open(stream.wavpath)
# itunes
self.track_list = stream.query.get_tracks()
self.n_track = len(self.track_list)
self.current = 0
self.track = []
# inherit from query
self.target = stream.target
self.name = stream.query.collection_name
self.release_date = stream.query.release_date
self.copyright = stream.query.copyright
self.genre = stream.query.primary_genre_name
self.artist = stream.query.artist_name
# debug
self.err = {"dur": [], "pos": []}
self.cumtime = 0
def cur_track_start(self):
"""find audio signal activity that exceeds threshold and persists
call this the start frame of a track
"""
track = self.track[self.current]
threshold = 500
persistence = 1
found_count = 0
preactivity_offset = 1 * FS_DEC
firstindex = 0
if self.current > 0:
firstindex = self.track[self.current - 1].end_frame / DF
index = firstindex
while found_count <= persistence:
if self.envelope[index] > threshold:
found_count += 1
else:
found_count = 0
index += 1
# subtract off persistence and a preactivity_offset
activity = index - persistence - preactivity_offset
if activity < firstindex:
activity = firstindex
track.start_frame = activity * DF
def cur_track_stop(self):
"""find the min energy point around a reference
"""
track = self.track[self.current]
n_samp_track = int(track.duration * MS2SEC * FS_DEC)
reference = track.start_frame / DF + n_samp_track
# +- 5 seconds around projected end frame
excursion = 5 * FS_DEC
curpos = reference - excursion
go_till = np.min(
[reference + excursion, self.wavstream.getnframes() / DF])
local_min = 1e9
local_idx = -1
while curpos < go_till:
if self.envelope[curpos] < local_min:
local_min = self.envelope[curpos]
local_idx = curpos
curpos += 1
track.end_frame = local_idx * DF
track.n_frame = track.end_frame - track.start_frame
def locate_track(self):
""" find track starts/stops within stream
"""
# find start of track (find activity)
self.cur_track_start()
# find end of track (find local min)
self.cur_track_stop()
return self
def status(self):
""" status """
track = self.track[self.current]
trackname = track.name.strip().replace("/", ";")
# status prints
print("{}".format(trackname))
self.err["dur"].append(track.n_frame / FS - track.duration / 1000)
self.err["pos"].append(track.start_frame / FS - self.cumtime)
print("\tESTIMATED duration: %.2f sec --> position: %.2f sec" %
(track.n_frame / FS, track.start_frame / FS))
print("\tEXPECTED %.2f sec --> %.2f sec" %
(track.duration / 1000, self.cumtime))
print("\tERROR\t (%.2f, %.2f) sec \t --> (%.2f, %.2f) sec" %
(np.mean(self.err["dur"]), np.std(self.err["dur"]),
np.mean(self.err["pos"]), np.std(self.err["pos"])))
self.cumtime += track.duration / 1000
return self
def finalize(self):
""" finalize """
with wave.open(self.track.path, 'w') as wavtrack:
self.wavstream.setpos(self.track.start_frame)
y = np.fromstring(
self.wavstream.readframes(self.n_frame),
dtype=np.int16)
y = np.reshape(y, (int(y.shape[0] / 2), 2))
wavtrack.setnchannels(2)
wavtrack.setsampwidth(2)
wavtrack.setnframes(self.n_frame)
wavtrack.setframerate(self.wavstream.getframerate())
wavtrack.writeframes(y)
def process(self):
"""encapsulate the substance of Album processing
"""
# iterate and initialize tracks
for i in range(self.n_track):
self.track.append(Track(self.track_list[i]))
self.track[i].set_path(i, self.target)
# compute audio power envelope
self.envelope = wave_envelope(self.wavstream)
# truncate zeros in beginning
first_nz = np.nonzero(self.envelope)[0][0] - FS_DEC * 3
self.envelope = self.envelope[first_nz:-1]
self.imageit()
# test envelope to expected
n_sec_env = len(self.envelope) / FS_DEC
n_sec_exp = sum([t.duration * MS2SEC for t in self.track])
if abs(1 - n_sec_env / n_sec_exp) > .05:
raise StreamError("envelope does not match expected duration")
# iterate and process tracks
for i in range(self.n_track):
self.locate_track().status()
self.current += 1
self.imageit()
# close the wav stream
self.wavstream.close()
return self
def imageit(self):
""" imageit """
x_data = self.envelope < 20**2
y = self.envelope / (np.max(self.envelope) * 0.008)
n = np.shape(x_data)[0]
n_min = int(n / FS_DEC / 60)
plt.figure(figsize=(3 * n_min, 4))
plt.plot(x_data, marker=".", linestyle='')
# plt.plot(y, marker=".", linestyle='', markersize=3)
plt.plot(y, marker=".", linestyle='')
plt.ylim(0, 1.1)
marks = np.cumsum([t.duration * MS2SEC * FS_DEC for t in self.track])
[plt.axvline(x_data=mark, color="b", linestyle="--") for mark in marks]
saveit('image')
class Track():
def __init__(self, itrack):
# copy from itunespy.Track
self.duration = itrack.track_time
self.name = itrack.track_name
self.artist = itrack.artist_name
self.number = itrack.track_number
# streamy attrs
self.start_frame = 0
self.end_frame = 0
self.n_frame = 0
def set_path(self, i, root):
self.index = i
self.path = os.path.join(
root, "%0.2d %s.wav" % (self.index + 1, self.name))
def get_mean_stereo(wav, N):
"""grab samples from one channel (every other sample) of frame
"""
x_data = np.fromstring(wav.readframes(N), dtype=np.int16)
return np.mean(np.reshape(x_data, (2, -1)), axis=0)
def wave_envelope(wavstream):
"""wave_envelope
"""
util.printr("computing audio energy at {} downsample rate...".format(DF))
n_window = int(np.floor(wavstream.getnframes() / DF)) - 1
x_data = np.zeros(n_window)
for i in trange(n_window):
x_data[i] = np.var(get_mean_stereo(wavstream, DF))
return x_data
def saveit(name):
""" saveit """
savepath = os.path.join(config["path"]["envelopes"], name + ".png")
util.printr("saving to {}".format(savepath))
plt.savefig(savepath, bbox_inches='tight')
def image_audio_envelope_with_tracks_markers(markers, stream):
"""track-splitting validation image
"""
x_data = wave_envelope(stream.wavpath)
downsamp = config["streams"]["downsample_factor"]
efr = 44100 / downsamp
starts = [mark[0] / downsamp for mark in markers]
stops = [starts[i] + mark[1] / downsamp for i, mark in enumerate(markers)]
n = np.shape(x_data)[0]
n_min = int(n / efr / 60)
# create image (one inch per minute of audio)
plt.figure(figsize=(n_min, 10))
plt.plot(x_data, marker=".", linestyle='', markersize=0.2)
[plt.axvline(
x_data=start, color="b", linestyle="--",
linewidth=0.3) for start in starts]
[plt.axvline(
x_data=stop, color="r", linestyle="--",
linewidth=0.3) for stop in stops]
saveit(stream.name)
def stream2tracks(streampath):
"""process raw pcm stream to tagged album tracks.
"""
util.printr("Begin stream2tracks...")
# initialize the stream
stream = Stream(streampath)
stream.decode_path().itunes_query().prepare_target().pcm2wav()
# process the stream into an album
album = Album(stream).process()
# finalize the stream into flac files with tags
stream.flacify().tagify()
# create an image of the audio envelope indicating where track splits
# have been located
image_audio_envelope_with_tracks_markers(album.splits, stream)
util.printr("Finish stream2tracks.")
def main(args):
""" main
"""
# iterate over streams found in config["path"]["pcm"]
streams = glob(os.path.join(config["path"]["pcm"], "*pcm"))
for streampath in streams:
stream2tracks(streampath)
if __name__ == "__main__":
main()
|
mit
|
teonlamont/mne-python
|
mne/decoding/time_delaying_ridge.py
|
2
|
13108
|
# -*- coding: utf-8 -*-
"""TimeDelayingRidge class."""
# Authors: Eric Larson <[email protected]>
# Ross Maddox <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from .base import BaseEstimator
from ..filter import next_fast_len
from ..utils import warn
from ..externals.six import string_types
def _compute_corrs(X, y, smin, smax):
"""Compute auto- and cross-correlations."""
if X.ndim == 2:
assert y.ndim == 2
X = X[:, np.newaxis, :]
y = y[:, np.newaxis, :]
assert X.shape[:2] == y.shape[:2]
len_trf = smax - smin
len_x, n_epochs, n_ch_x = X.shape
len_y, n_epcohs, n_ch_y = y.shape
assert len_x == len_y
n_fft = next_fast_len(X.shape[0] + max(smax, 0) - min(smin, 0) - 1)
x_xt = np.zeros([n_ch_x * len_trf] * 2)
x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F')
for ei in range(n_epochs):
this_X = X[:, ei, :]
X_fft = np.fft.rfft(this_X, n_fft, axis=0)
y_fft = np.fft.rfft(y[:, ei, :], n_fft, axis=0)
# compute the autocorrelations
for ch0 in range(n_ch_x):
other_sl = slice(ch0, n_ch_x)
ac_temp = np.fft.irfft(X_fft[:, ch0][:, np.newaxis] *
X_fft[:, other_sl].conj(), n_fft, axis=0)
n_other = ac_temp.shape[1]
row = ac_temp[:len_trf] # zero and positive lags
col = ac_temp[-1:-len_trf:-1] # negative lags
# Our autocorrelation structure is a Toeplitz matrix, but
# it's faster to create the Toeplitz ourselves.
x_xt_temp = np.zeros((len_trf, len_trf, n_other))
for ii in range(len_trf):
x_xt_temp[ii, ii:] = row[:len_trf - ii]
x_xt_temp[ii + 1:, ii] = col[:len_trf - ii - 1]
row_adjust = np.zeros((len_trf, n_other))
col_adjust = np.zeros((len_trf, n_other))
# However, we need to adjust for coeffs that are cut off by
# the mode="same"-like behavior of the algorithm,
# i.e. the non-zero delays should not have the same AC value
# as the zero-delay ones (because they actually have fewer
# coefficients).
#
# These adjustments also follow a Toeplitz structure, but it's
# computationally more efficient to manually accumulate and
# subtract from each row and col, rather than accumulate a single
# adjustment matrix using Toeplitz repetitions then subtract
# Adjust positive lags where the tail gets cut off
for idx in range(1, smax):
ii = idx - smin
end_sl = slice(X.shape[0] - idx, -smax - min(ii, 0), -1)
c = (this_X[-idx, other_sl][np.newaxis] *
this_X[end_sl, ch0][:, np.newaxis])
r = this_X[-idx, ch0] * this_X[end_sl, other_sl]
if ii <= 0:
col_adjust += c
row_adjust += r
if ii == 0:
x_xt_temp[0, :] = row - row_adjust
x_xt_temp[1:, 0] = col - col_adjust[1:]
else:
col_adjust[:-ii] += c
row_adjust[:-ii] += r
x_xt_temp[ii, ii:] = row[:-ii] - row_adjust[:-ii]
x_xt_temp[ii + 1:, ii] = col[:-ii] - col_adjust[1:-ii]
# Adjust negative lags where the head gets cut off
x_xt_temp = x_xt_temp[::-1][:, ::-1]
row_adjust.fill(0.)
col_adjust.fill(0.)
for idx in range(0, -smin):
ii = idx + smax
start_sl = slice(idx, -smin + min(ii, 0))
c = (this_X[idx, other_sl][np.newaxis] *
this_X[start_sl, ch0][:, np.newaxis])
r = this_X[idx, ch0] * this_X[start_sl, other_sl]
if ii <= 0:
col_adjust += c
row_adjust += r
if ii == 0:
x_xt_temp[0, :] -= row_adjust
x_xt_temp[1:, 0] -= col_adjust[1:]
else:
col_adjust[:-ii] += c
row_adjust[:-ii] += r
x_xt_temp[ii, ii:] -= row_adjust[:-ii]
x_xt_temp[ii + 1:, ii] -= col_adjust[1:-ii]
x_xt_temp = x_xt_temp[::-1][:, ::-1]
for oi in range(n_other):
ch1 = oi + ch0
# Store the result
this_result = x_xt_temp[:, :, oi]
x_xt[ch0 * len_trf:(ch0 + 1) * len_trf,
ch1 * len_trf:(ch1 + 1) * len_trf] += this_result
if ch0 != ch1:
x_xt[ch1 * len_trf:(ch1 + 1) * len_trf,
ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T
# compute the crosscorrelations
cc_temp = np.fft.irfft(
y_fft * X_fft[:, ch0][:, np.newaxis].conj(), n_fft, axis=0)
if smin < 0 and smax >= 0:
x_y[:-smin, ch0] += cc_temp[smin:]
x_y[len_trf - smax:, ch0] += cc_temp[:smax]
else:
x_y[:, ch0] += cc_temp[smin:smax]
x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F')
return x_xt, x_y, n_ch_x
def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct',
normed=False):
"""Compute regularization parameter from neighbors."""
from scipy.sparse.csgraph import laplacian
known_types = ('ridge', 'laplacian')
if isinstance(reg_type, string_types):
reg_type = (reg_type,) * 2
if len(reg_type) != 2:
raise ValueError('reg_type must have two elements, got %s'
% (len(reg_type),))
for r in reg_type:
if r not in known_types:
raise ValueError('reg_type entries must be one of %s, got %s'
% (known_types, r))
reg_time = (reg_type[0] == 'laplacian' and n_delays > 1)
reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1)
if not reg_time and not reg_chs:
return np.eye(n_ch_x * n_delays)
# regularize time
if reg_time:
reg = np.eye(n_delays)
stride = n_delays + 1
reg.flat[1::stride] += -1
reg.flat[n_delays::stride] += -1
reg.flat[n_delays + 1:-n_delays - 1:stride] += 1
args = [reg] * n_ch_x
reg = linalg.block_diag(*args)
else:
reg = np.zeros((n_delays * n_ch_x,) * 2)
# regularize features
if reg_chs:
block = n_delays * n_delays
row_offset = block * n_ch_x
stride = n_delays * n_ch_x + 1
reg.flat[n_delays:-row_offset:stride] += -1
reg.flat[n_delays + row_offset::stride] += 1
reg.flat[row_offset:-n_delays:stride] += -1
reg.flat[:-(n_delays + row_offset):stride] += 1
assert np.array_equal(reg[::-1, ::-1], reg)
if method == 'direct':
if normed:
norm = np.sqrt(np.diag(reg))
reg /= norm
reg /= norm[:, np.newaxis]
return reg
else:
# Use csgraph. Note that our -1's above are really the neighbors!
# If we ever want to allow arbitrary adjacency matrices, this is how
# we'd want to do it.
reg = laplacian(-reg, normed=normed)
return reg
def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in):
"""Fit the model using correlation matrices."""
# do the regularized solving
n_ch_out = x_y.shape[1]
assert x_y.shape[0] % n_ch_x == 0
n_delays = x_y.shape[0] // n_ch_x
reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type)
mat = x_xt + alpha * reg
# From sklearn
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
w = linalg.solve(mat, x_y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warn('Singular matrix in solving dual problem. Using '
'least-squares solution instead.')
w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0]
w = w.T.reshape([n_ch_out, n_ch_in, n_delays])
return w
class TimeDelayingRidge(BaseEstimator):
"""Ridge regression of data with time delays.
Parameters
----------
tmin : int | float
The starting lag, in seconds (or samples if ``sfreq`` == 1).
Negative values correspond to times in the past.
tmax : int | float
The ending lag, in seconds (or samples if ``sfreq`` == 1).
Positive values correspond to times in the future.
Must be >= tmin.
sfreq : float
The sampling frequency used to convert times into samples.
alpha : float
The ridge (or laplacian) regularization factor.
reg_type : str | list
Can be "ridge" (default) or "laplacian".
Can also be a 2-element list specifying how to regularize in time
and across adjacent features.
fit_intercept : bool
If True (default), the sample mean is removed before fitting.
Notes
-----
This class is meant to be used with :class:`mne.decoding.ReceptiveField`
by only implicitly doing the time delaying. For reasonable receptive
field and input signal sizes, it should be more CPU and memory
efficient by using frequency-domain methods (FFTs) to compute the
auto- and cross-correlations.
See Also
--------
mne.decoding.ReceptiveField
"""
_estimator_type = "regressor"
def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge',
fit_intercept=True): # noqa: D102
if tmin > tmax:
raise ValueError('tmin must be <= tmax, got %s and %s'
% (tmin, tmax))
self.tmin = float(tmin)
self.tmax = float(tmax)
self.sfreq = float(sfreq)
self.alpha = float(alpha)
self.reg_type = reg_type
self.fit_intercept = fit_intercept
@property
def _smin(self):
return int(round(self.tmin * self.sfreq))
@property
def _smax(self):
return int(round(self.tmax * self.sfreq)) + 1
def fit(self, X, y):
"""Estimate the coefficients of the linear model.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples[, n_epochs], n_outputs)
The target values.
Returns
-------
self : instance of TimeDelayingRidge
Returns the modified instance.
"""
if X.ndim == 3:
assert y.ndim == 3
assert X.shape[:2] == y.shape[:2]
else:
assert X.ndim == 2 and y.ndim == 2
assert X.shape[0] == y.shape[0]
# These are split into two functions because it's possible that we
# might want to allow people to do them separately (e.g., to test
# different regularization parameters).
if self.fit_intercept:
# We could do this in the Fourier domain, too, but it should
# be a bit cleaner numerically to do it here.
X_offset = np.mean(X, axis=0)
y_offset = np.mean(y, axis=0)
if X.ndim == 3:
X_offset = X_offset.mean(axis=0)
y_offset = np.mean(y_offset, axis=0)
X = X - X_offset
y = y - y_offset
else:
X_offset = y_offset = 0.
self.cov_, x_y_, n_ch_x = _compute_corrs(X, y, self._smin, self._smax)
self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x,
self.reg_type, self.alpha, n_ch_x)
# This is the sklearn formula from LinearModel (will be 0. for no fit)
if self.fit_intercept:
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T)
else:
self.intercept_ = 0.
return self
def predict(self, X):
"""Predict the output.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The data.
Returns
-------
X : ndarray
The predicted response.
"""
if X.ndim == 2:
X = X[:, np.newaxis, :]
singleton = True
else:
singleton = False
out = np.zeros(X.shape[:2] + (self.coef_.shape[0],))
smin = self._smin
offset = max(smin, 0)
for ei in range(X.shape[1]):
for oi in range(self.coef_.shape[0]):
for fi in range(self.coef_.shape[1]):
temp = np.convolve(X[:, ei, fi], self.coef_[oi, fi])
temp = temp[max(-smin, 0):][:len(out) - offset]
out[offset:len(temp) + offset, ei, oi] += temp
out += self.intercept_
if singleton:
out = out[:, 0, :]
return out
|
bsd-3-clause
|
villasv/mmd-enem2012
|
brasilplot.py
|
2
|
2590
|
from mpl_toolkits.basemap import Basemap
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
brasil = Basemap(projection='merc',
resolution = 'h', area_thresh = 0.01,
llcrnrlon=-75, llcrnrlat=-34,
urcrnrlon=-32, urcrnrlat=8)
brasil.readshapefile('./dataset/gis-dataset-brasil-master/municipio/shapefile/Munic','municipios',drawbounds=False)
brasil.readshapefile('./dataset/gis-dataset-brasil-master/uf/shapefile/uf','estados',drawbounds=False)
def plot_por_municipio(series,colors,ax=None,vmin=None,vmax=None):
sorted_data = series.copy().sort()
brasil.fillcontinents(ax=ax)
vmin = series.min() if vmin is None else vmin
vmax = series.max() if vmax is None else vmax
for shapedict,shape in zip(brasil.municipios_info,brasil.municipios):
from unidecode import unidecode
nome = shapedict['NOME'].decode("latin1")
nome = unidecode(nome).upper().replace("'",' ')
try:
valor = series[nome]
except KeyError:
valor = 0
xx,yy = zip(*shape)
cmap = plt.cm.get_cmap(colors)
color = cmap(1.-np.sqrt((1.0*valor-vmin)/(vmax-vmin)))[:3]
color = mpl.colors.rgb2hex(color)
if valor==0:
color = '#cc9966'
ax.fill(xx,yy,color,edgecolor='black',linewidth=0.0)
pass
if colors.count("_r"):
rcmap = plt.cm.get_cmap(colors[:-2])
else:
rcmap = plt.cm.get_cmap(colors+"_r")
cax = plt.cm.ScalarMappable(cmap=rcmap)
cax.set_array([vmin,vmax])
cbar = plt.colorbar(cax,ax=ax)
return
def plot_por_estado(series,colors,ax=None,vmin=None,vmax=None):
sorted_data = series.copy().sort()
brasil.fillcontinents(ax=ax)
vmin = series.min() if vmin is None else vmin
vmax = series.max() if vmax is None else vmax
for shapedict,shape in zip(brasil.estados_info,brasil.estados):
nome = shapedict['UF_05']
try:
valor = series[nome]
except KeyError:
valor = 0
xx,yy = zip(*shape)
cmap = plt.cm.get_cmap(colors)
color = cmap(1.-np.sqrt((1.0*valor-vmin)/(vmax-vmin)))[:3]
color = mpl.colors.rgb2hex(color)
if valor==0:
color = '#cc9966'
ax.fill(xx,yy,color,edgecolor='black',linewidth=1.0)
pass
if colors.count("_r"):
rcmap = plt.cm.get_cmap(colors[:-2])
else:
rcmap = plt.cm.get_cmap(colors+"_r")
cax = plt.cm.ScalarMappable(cmap=rcmap)
cax.set_array([vmin,vmax])
cbar = plt.colorbar(cax,ax=ax)
return
|
gpl-3.0
|
terkkila/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
244
|
7588
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
matthewfranglen/spark
|
python/pyspark/sql/pandas/utils.py
|
12
|
2634
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.15.1"
from distutils.version import LooseVersion
import os
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
if os.environ.get("ARROW_PRE_0_15_IPC_FORMAT", "0") == "1":
raise RuntimeError("Arrow legacy IPC format is not supported in PySpark, "
"please unset ARROW_PRE_0_15_IPC_FORMAT")
|
mit
|
wjlei1990/spaceweight
|
src/spaceweight/spherevoronoi.py
|
1
|
18655
|
"""
Spherical Voronoi Code
.. versionadded:: 0.17.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as Scipy.
#
from __future__ import print_function, division, absolute_import
import numpy as np
import scipy
import itertools
from scipy._lib._version import NumpyVersion
from scipy.spatial import distance
import math
# Whether Numpy has stacked matrix linear algebra
HAS_NUMPY_VEC_DET = (NumpyVersion(np.__version__) >= '1.8.0')
__all__ = ['SphericalVoronoi']
def convert_cartesian_to_sphere(coords, angle_measure='radians'):
'''
Take shape (3, ) cartesian coord_array and
return an array of the same shape in spherical
polar form (r, theta, phi). Based on StackOverflow
response: http://stackoverflow.com/a/4116899
use radians for the angles by default, degrees
if angle_measure == 'degrees'
'''
spherical_coord = np.zeros(coords.shape)
spherical_coord[0] = np.sqrt(np.sum(np.power(coords, 2)))
spherical_coord[1] = \
np.arctan2(coords[1], coords[0])
spherical_coord[2] = \
np.arccos(coords[2] / spherical_coord[0])
if angle_measure == 'degrees':
spherical_coord[1] = np.degrees(spherical_coord[1])
spherical_coord[2] = np.degrees(spherical_coord[2])
return spherical_coord
def calculate_haversine_distance(point_1, point_2, sphere_radius):
'''
Calculate the haversine-based distance between two points on the
surface of a sphere. Should be more accurate than the arc cosine
strategy. See, for example:
http://en.wikipedia.org/wiki/Haversine_formula
'''
spherical_array_1 = convert_cartesian_to_sphere(point_1)
spherical_array_2 = convert_cartesian_to_sphere(point_2)
lambda_1 = spherical_array_1[1]
lambda_2 = spherical_array_2[1]
phi_1 = spherical_array_1[2]
phi_2 = spherical_array_2[2]
# we rewrite the standard Haversine slightly as
# long/lat is not the same as spherical
# coordinates - phi differs by pi/4
spherical_distance = \
2.0 * sphere_radius * \
math.asin(math.sqrt(((1 - math.cos(phi_2-phi_1))/2.) +
math.sin(phi_1) * math.sin(phi_2) *
((1 - math.cos(lambda_2-lambda_1))/2.)))
return spherical_distance
def determinant_fallback(m):
"""
Calculates the determinant of m using Laplace expansion in the first row.
This function is used only as a fallback to ensure backwards compatibility
with Python 2.6.
:param m: an array of floats assumed to be of shape (4, 4)
returns: determinant of m
"""
def det3(a):
"""
Calculates the determinant of a using Sarrus' rule.
a : an array of floats assumed to be of shape (3, 3)
returns : determinant of a
"""
return \
a[0][0] * a[1][1] * a[2][2] \
+ a[0][1] * a[1][2] * a[2][0] \
+ a[0][2] * a[1][0] * a[2][1] \
- a[0][2] * a[1][1] * a[2][0] \
- a[0][1] * a[1][0] * a[2][2] \
- a[0][0] * a[1][2] * a[2][1]
minors = [det3(np.delete(np.delete(m, 0, axis=0), k, axis=1))
for k in range(0, 4)]
return sum([(-1) ** k * m[0][k] * minors[k] for k in range(0, 4)])
def calc_circumcenters(tetrahedrons):
"""
Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
if HAS_NUMPY_VEC_DET:
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
else:
dx = np.array([determinant_fallback(m) for m in dx])
dy = -np.array([determinant_fallback(m) for m in dy])
dz = np.array([determinant_fallback(m) for m in dz])
a = np.array([determinant_fallback(m) for m in a])
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.17.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points to construct a spherical
Voronoi diagram from
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Notes
----------
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
>>> center = np.array([0, 0, 0])
>>> radius = 1
>>> # calculate spherical Voronoi diagram
>>> sv = SphericalVoronoi(points, radius, center)
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> # generate plot
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None):
"""
Initializes the object and starts the computation of the Voronoi
diagram.
points : The generator points of the Voronoi diagram assumed to be
all on the sphere with radius supplied by the radius parameter and
center supplied by the center parameter.
radius : The radius of the sphere. Will default to 1 if not supplied.
center : The center of the sphere. Will default to the origin if not
supplied.
"""
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
generator_indices = np.arange(self.points.shape[0])
filter_tuple = np.where((np.expand_dims(self._tri.simplices,
-1) == generator_indices).any(axis=1))
list_tuples_associations = zip(filter_tuple[1],
filter_tuple[0])
list_tuples_associations = sorted(list_tuples_associations,
key=lambda t: t[0])
# group by generator indices to produce
# unsorted regions in nested list
groups = []
for k, g in itertools.groupby(list_tuples_associations,
lambda t: t[0]):
groups.append([element[1] for element in list(g)])
self.regions = groups
def sort_vertices_of_regions(self):
"""
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
for n in range(0, len(self.regions)):
remaining = self.regions[n][:]
sorted_vertices = []
current_simplex = remaining[0]
current_vertex = [k for k in self._tri.simplices[current_simplex]
if k != n][0]
remaining.remove(current_simplex)
sorted_vertices.append(current_simplex)
while remaining:
current_simplex = [
s for s in remaining
if current_vertex in self._tri.simplices[s]][0]
current_vertex = [
s for s in self._tri.simplices[current_simplex]
if s != n and s != current_vertex][0]
remaining.remove(current_simplex)
sorted_vertices.append(current_simplex)
self.regions[n] = sorted_vertices
def compute_surface_area(self):
'''
Returns a dictionary with the estimated surface areas of
the Voronoi region polygons corresponding to each generator
(original data point) index. An example dictionary entry:
`{generator_index : surface_area, ...}`.
'''
surface_area_list = []
for _idx, vertex_idx in enumerate(self.regions):
# create the array of vertices
vertex_array = []
for _i in vertex_idx:
vertex_array.append(self.vertices[_i])
vertex_array = np.array(vertex_array)
# calculate surface area of one patch
_surface_area = self._calculate_patch_surface_area(vertex_array)
assert _surface_area > 0, \
"Obtained a surface area of zero for a Voronoi region."
surface_area_list.append(_surface_area)
surface_area_list = np.array(surface_area_list)
coverage = self._surface_area_coverage(surface_area_list)
return surface_area_list, coverage
def _surface_area_coverage(self, surface_area_list):
sphere_surface = 4 * np.pi * self.radius ** 2
return np.sum(surface_area_list) / sphere_surface
@staticmethod
def _calculate_patch_surface_area(polygon_vertices):
'''
Calculate the surface area of a polygon on the surface of
a sphere. Based on equation provided here:
http://mathworld.wolfram.com/LHuiliersTheorem.html
Decompose into triangles, calculate excess for each
'''
# handle nearly-degenerate vertices on the unit sphere by
# returning an area close to 0 -- may be better options,
# but this is my current solution to prevent crashes, etc.
# seems to be relatively rare in my own work, but
# sufficiently common to cause crashes when iterating
# over large amounts of messy data
if distance.pdist(polygon_vertices).min() < (10 ** -7):
return 10 ** -8
else:
n = polygon_vertices.shape[0]
# point we start from
root_point = polygon_vertices[0]
totalexcess = 0
# loop from 1 to n-2, with point 2 to n-1 as other
# vertex of triangle this could definitely be
# written more nicely
b_point = polygon_vertices[1]
root_b_dist = \
calculate_haversine_distance(root_point, b_point, 1.0)
for i in 1 + np.arange(n - 2):
a_point = b_point
b_point = polygon_vertices[i+1]
root_a_dist = root_b_dist
root_b_dist = \
calculate_haversine_distance(root_point, b_point, 1.0)
a_b_dist = \
calculate_haversine_distance(a_point, b_point, 1.0)
s = (root_a_dist + root_b_dist + a_b_dist) / 2
totalexcess += \
4 * math.atan(math.sqrt(math.tan(0.5 * s) *
math.tan(0.5 * (s-root_a_dist)) *
math.tan(0.5 * (s-root_b_dist)) *
math.tan(0.5 * (s-a_b_dist))))
return totalexcess
|
gpl-3.0
|
Scan-o-Matic/scanomatic
|
scanomatic/io/meta_data.py
|
1
|
14261
|
from __future__ import absolute_import
import csv
from itertools import izip
from types import StringTypes
import numpy as np
#
# OPTIONAL IMPORT
#
try:
import pandas as pd
_PANDAS = True
except ImportError:
_PANDAS = False
pd = None
#
# INTERNAL DEPENDENCIES
#
import scanomatic.io.logger as logger
#
# METHODS
#
class DataLoader(object):
_SUFFIXES = []
def __init__(self, path):
self._logger = logger.Logger("MetaDataLoader")
self._path = path
self._sheet = -1
self._entries = []
self._row_iterators = []
self._columns = []
self._sheet_names = []
self._headers = []
""":type : [str]"""
def _reset(self):
self._sheet = -1
self._columns = []
self._entries = []
self._row_iterators = []
self._sheet_names = []
self._headers = []
@property
def rows(self):
return self._entries[self._sheet]
def get_sheet_name(self, sheet_index):
return self._sheet_names[sheet_index]
def get_next(self):
for row in self._row_iterators[self._sheet]:
yield self._get_next_row(row)
@staticmethod
def _get_next_row(row):
raise NotImplemented
def _get_empty_headers(self):
return [None for _ in range(self._columns[self._sheet])]
def get_headers(self, plate_size):
if self._headers[self._sheet] is None:
if self.sheet_is_valid_with_headers(plate_size):
self._headers[self._sheet] = self._get_next_row(
self._row_iterators[self._sheet].next())
elif self.sheet_is_valid_without_headers(plate_size):
self._headers[self._sheet] = self._get_empty_headers()
return self._headers[self._sheet]
@classmethod
def can_load(cls, path):
"""
Args:
path:
:type path : str
Returns:
"""
suffix = path[::-1].split(".", 1)[0][::-1].lower()
return suffix in cls._SUFFIXES
@property
def sheets(self):
return len(self._entries)
def next_sheet(self, plate_size):
self._sheet += 1
while not self.sheet_is_valid(plate_size):
if self._sheet >= len(self._entries):
return None
self._logger.warning(
"Sheet {0} ({1} zero-indexed) has {2} and {3} entry rows. This doesn't match plate size {4}".format(
self._sheet_names[self._sheet],
self._sheet,
" header row" if self.sheet_is_valid_without_headers(plate_size) else "no headers",
self._entries[self._sheet],
plate_size))
self._sheet += 1
return self._sheet
@property
def has_more_data(self):
return self._sheet < len(self._entries)
def sheet_is_valid_with_headers(self, plate_size):
return plate_size % (self._entries[self._sheet] - 1) == 0 and \
(plate_size / (self._entries[self._sheet] - 1)) % 4 == 1
def sheet_is_valid_without_headers(self, plate_size):
return plate_size % self._entries[self._sheet] == 0 and \
plate_size / self._entries[self._sheet] % 4 == 1
def sheet_is_valid(self, plate_size):
if 0 <= self._sheet < len(self._entries):
return (
self.sheet_is_valid_with_headers(plate_size) or
self.sheet_is_valid_without_headers(plate_size))
return False
class ExcelLoader(DataLoader):
_SUFFIXES = ['xls', 'xlsx']
def __init__(self, path):
super(ExcelLoader, self).__init__(path)
self._data = None
self._load()
def _load(self):
self._data = []
self._reset()
doc = pd.ExcelFile(self._path)
for n in doc.sheet_names:
self._sheet_names.append(n)
self._load_sheet(doc.parse(n, header=None).fillna(value=u''))
def _load_sheet(self, df):
"""
Args:
df: DataFrame / sheet
:type df : pandas.DataFrame
Returns:
"""
self._data.append(df)
self._entries.append(df.shape[0])
self._columns.append(df.shape[1])
self._headers.append(None)
self._row_iterators.append(df.iterrows())
@staticmethod
def _get_next_row(row):
return row[1].tolist()
class CSVLoader(DataLoader):
_SUFFIXES = ("csv", "tsv", "tab", "txt")
def __init__(self, path):
super(CSVLoader, self).__init__(path)
self._load()
def _load(self):
self._reset()
with open(self._path) as fh:
raw_data = fh.readlines()
dialect = csv.Sniffer().sniff(raw_data[0])
data = csv.reader(raw_data, dialect=dialect)
self._columns.append(max(len(row) for row in data))
self._entries.append(len(data))
self._headers.append(None)
self._row_iterators.append((row for row in data))
@staticmethod
def _get_next_row(row):
return row
class MetaData2(object):
_LOADERS = (ExcelLoader, CSVLoader)
def __init__(self, plate_shapes, *paths):
self._logger = logger.Logger("MetaData")
self._plate_shapes = plate_shapes
self._data = tuple(
None if shape is None else np.empty(shape, dtype=np.object)
for shape in plate_shapes)
self._headers = list(None for _ in plate_shapes)
""":type self._headers: list[(int, int) | None]"""
self._loading_plate = 0
self._loading_offset = []
self._paths = paths
self._load(*paths)
if not self.loaded:
self._logger.warning("Not enough meta-data to fill all plates")
def __call__(self, plate, outer, inner):
return self._data[plate][outer, inner]
def __getitem__(self, plate):
return self._data[plate].tolist()
def __eq__(self, other):
if hasattr(other, "shapes") and self.shapes != other.shapes:
return False
for plate, outer, inner in self.generate_coordinates():
if self(plate, outer, inner) != other(plate, outer, inner):
return False
return True
def __getstate__(self):
return {k: v for k, v in self.__dict__.iteritems() if k != "_logger"}
def __setstate__(self, state):
self.__dict__.update(state)
def get_column_index_from_all_plates(self, index):
plates = []
for id_plate, (outers, inners) in enumerate(self._plate_shapes):
plate = []
plates.append(plate)
for id_outer in range(outers):
data = []
plate.append(data)
for id_inner in range(inners):
data.append(self(id_plate, id_outer, id_inner)[index])
return plates
def get_header_row(self, plate):
"""
Args:
plate: Plate index
:type plate : int
Returns: Header row
:rtype : list[str]
"""
return self._headers[plate]
@property
def shapes(self):
return self._plate_shapes
@property
def loaded(self):
return self._loading_plate >= len(self._plate_shapes)
@property
def _plate_completed(self):
return len(self._loading_offset) == 0
@staticmethod
def _get_loader(path):
for loader in MetaData2._LOADERS:
if loader.can_load(path):
return loader(path)
return None
def _load(self, *paths):
for path in paths:
if self.loaded:
return
loader = MetaData2._get_loader(path)
""":type : DataLoader"""
if loader is None:
self._logger.warning(
"Unknown file format, can't load {0}".format(path))
continue
size = self._get_sought_size()
while loader.has_more_data:
sheet_id = loader.next_sheet(size)
if sheet_id is None:
break
headers = loader.get_headers(size)
if not self._has_matching_headers(headers):
self._logger.warning(
"Sheet {0} ({1}) of {2} headers don't match {3} != {4}".format(
loader.get_sheet_name(sheet_id),
sheet_id,
path,
headers,
self._headers[self._loading_plate]))
continue
self._logger.info("Using {0}:{1} for plate {2}".format(
path, loader.get_sheet_name(sheet_id),
self._loading_plate))
self._update_headers_if_needed(headers)
self._update_meta_data(loader)
self._update_loading_offsets()
if self._plate_completed:
self._loading_plate += 1
if self.loaded:
return
size = self._get_sought_size()
def _get_sought_size(self):
size = np.prod(self._plate_shapes[self._loading_plate])
return size / 4 ** len(self._loading_offset)
def _update_loading_offsets(self):
if not self._loading_offset:
return
outer, inner = self._loading_offset[-1]
inner += 1
if inner > 1:
inner %= 2
outer += 1
if outer > 1:
self._loading_offset = self._loading_offset[:-1]
self._update_loading_offsets()
return
self._loading_offset[-1] = (outer, inner)
def _has_matching_headers(self, headers):
if self._headers[self._loading_plate] is None:
return True
elif len(self._headers[self._loading_plate]) != len(headers):
return False
elif all(h is None for h in self._headers[self._loading_plate]):
return True
else:
return all(a == b for a, b in
zip(self._headers[self._loading_plate], headers))
def _update_headers_if_needed(self, headers):
if (self._headers[self._loading_plate] is None or
all(h is None for h in self._headers[self._loading_plate])):
self._headers[self._loading_plate] = headers
def _update_meta_data(self, loader):
slotter = self._get_slotting_iter(loader)
for meta_data in loader.get_next():
self._data[self._loading_plate][slotter.next()] = meta_data
def _get_slotting_iter(self, loader):
"""
Args:
loader:
:type loader: DataLoader
Returns:
Coordinate iterator
:rtype : iter
"""
def coord_lister(outer, inner, max_outer, max_inner):
yield outer, inner
inner += factor
if inner >= max_inner:
inner %= max_inner
outer += factor
if outer >= max_outer:
outer %= max_outer
factor = np.log2(
self._data[self._loading_plate].size /
(loader.rows - loader.sheet_is_valid_with_headers(
np.prod(self._plate_shapes[self._loading_plate]))))
if factor != int(factor):
return None
elif factor == 0:
# Full plate
return izip(*np.unravel_index(
np.arange(self._data[self._loading_plate].size),
self._plate_shapes[self._loading_plate]))
else:
# Partial plate
factor = int(factor)
if factor > len(self._loading_offset):
self._loading_offset += [
(0, 0) for _ in range(factor - len(self._loading_offset))]
outer, inner = map(
sum,
zip(*((o*2**l, i*2**l) for l, (o, i)
in enumerate(self._loading_offset))))
factor = 2 ** len(self._loading_offset)
max_outer, max_inner = self._plate_shapes[self._loading_plate]
return coord_lister(outer, inner, max_outer, max_inner)
def get_data_from_numpy_where(self, plate, selection):
selection = zip(*selection)
for outer, inner in selection:
yield self(plate, outer, inner)
def find(self, value, column=None):
"""Generate coordinate tuples for where key matches meta-data
:param value : Search criteria
:type value : str
:param column : Optional column name to limit search to, default (None)
searches all columns
:type column: str | None
Returns
-------
generator
Each item being a (plate, row, column)-tuple.
"""
for id_plate, _ in enumerate(self._plate_shapes):
yield self.find_on_plate(id_plate, value, column=column)
def find_on_plate(self, plate, value, column=None):
if isinstance(column, StringTypes):
column = self.get_header_index(plate, column)
if column < 0:
yield tuple()
for id_plate, id_row, id_col in self.generate_coordinates(plate=plate):
data = self(id_plate, id_row, id_col)
if column is None:
if value in data:
yield (id_row, id_col)
else:
if value == data[column]:
yield (id_row, id_col)
def get_header_index(self, plate, header):
for i, column_header in enumerate(self.get_header_row(plate)):
if column_header.lower() == header.lower():
return i
return -1
def generate_coordinates(self, plate=None):
plates = ((i, p) for i, p in enumerate(self._plate_shapes)
if plate is None or i is plate)
for id_plate, shape in plates:
if shape is not None:
for id_row in xrange(shape[0]):
for id_col in xrange(shape[1]):
yield id_plate, id_row, id_col
|
gpl-3.0
|
nelson-liu/scikit-learn
|
sklearn/datasets/base.py
|
5
|
26099
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description : string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris(return_X_y=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer(return_X_y=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Parameters
----------
return_X_y : boolean, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10, return_X_y=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(np.int)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
if return_X_y:
return flat_data, target
return Bunch(data=flat_data,
target=target,
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes(return_X_y=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
if return_X_y:
return data, target
return Bunch(data=data, target=target,
feature_names=['age', 'sex', 'bmi', 'bp',
's1', 's2', 's3', 's4', 's5', 's6'])
def load_linnerud(return_X_y=False):
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
if return_X_y:
return data_exercise, data_physiological
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston(return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
|
bsd-3-clause
|
siddharthteotia/arrow
|
python/benchmarks/convert_pandas.py
|
6
|
2376
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pandas as pd
import pyarrow as pa
class PandasConversionsBase(object):
def setup(self, n, dtype):
if dtype == 'float64_nans':
arr = np.arange(n).astype('float64')
arr[arr % 10 == 0] = np.nan
else:
arr = np.arange(n).astype(dtype)
self.data = pd.DataFrame({'column': arr})
class PandasConversionsToArrow(PandasConversionsBase):
param_names = ('size', 'dtype')
params = ((10, 10 ** 6), ('int64', 'float64', 'float64_nans', 'str'))
def time_from_series(self, n, dtype):
pa.Table.from_pandas(self.data)
class PandasConversionsFromArrow(PandasConversionsBase):
param_names = ('size', 'dtype')
params = ((10, 10 ** 6), ('int64', 'float64', 'float64_nans', 'str'))
def setup(self, n, dtype):
super(PandasConversionsFromArrow, self).setup(n, dtype)
self.arrow_data = pa.Table.from_pandas(self.data)
def time_to_series(self, n, dtype):
self.arrow_data.to_pandas()
class ZeroCopyPandasRead(object):
def setup(self):
# Transpose to make column-major
values = np.random.randn(10, 100000)
df = pd.DataFrame(values.T)
ctx = pa.default_serialization_context()
self.serialized = ctx.serialize(df)
self.as_buffer = self.serialized.to_buffer()
self.as_components = self.serialized.to_components()
def time_deserialize_from_buffer(self):
pa.deserialize(self.as_buffer)
def time_deserialize_from_components(self):
pa.deserialize_components(self.as_components)
|
apache-2.0
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/cross_decomposition/cca_.py
|
7
|
3037
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
`x_weights_` : array, [p, n_components]
X block weights vectors.
`y_weights_` : array, [q, n_components]
Y block weights vectors.
`x_loadings_` : array, [p, n_components]
X block loadings vectors.
`y_loadings_` : array, [q, n_components]
Y block loadings vectors.
`x_scores_` : array, [n_samples, n_components]
X scores.
`y_scores_` : array, [n_samples, n_components]
Y scores.
`x_rotations_` : array, [p, n_components]
X block to latents rotations.
`y_rotations_` : array, [q, n_components]
Y block to latents rotations.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
apache-2.0
|
SMTorg/smt
|
smt/examples/multi_modal/run_genn_demo.py
|
3
|
8597
|
"""
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Description: This program uses the two dimensional Rastrigin function to demonstrate GENN,
which is an egg-crate-looking function that can be challenging to fit because
of its multi-modality.
Author: Steven H. Berguin <[email protected]>
This package is distributed under New BSD license.
"""
from smt.surrogate_models.genn import GENN, load_smt_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pyDOE2 import fullfact
SEED = 101
def get_practice_data(random=False):
"""
Return practice data for two-dimensional Rastrigin function
:param: random -- boolean, True = random sampling, False = full-factorial sampling
:return: (X, Y, J) -- np arrays of shapes (n_x, m), (n_y, m), (n_y, n_x, m) where n_x = 2 and n_y = 1 and m = 15^2
"""
# Response (N-dimensional Rastrigin)
f = lambda x: np.sum(x ** 2 - 10 * np.cos(2 * np.pi * x) + 10, axis=1)
df = lambda x, j: 2 * x[:, j] + 20 * np.pi * np.sin(2 * np.pi * x[:, j])
# Domain
lb = -1.0 # minimum bound (same for all dimensions)
ub = 1.5 # maximum bound (same for all dimensions)
# Design of experiment (full factorial)
n_x = 2 # number of dimensions
n_y = 1 # number of responses
L = 12 # number of levels per dimension
m = L ** n_x # number of training examples that will be generated
if random:
doe = np.random.rand(m, n_x)
else:
levels = [L] * n_x
doe = fullfact(levels)
doe = (doe - 0.0) / (L - 1.0) # values normalized such that 0 < doe < 1
assert doe.shape == (m, n_x)
# Apply bounds
X = lb + (ub - lb) * doe
# Evaluate response
Y = f(X).reshape((m, 1))
# Evaluate partials
J = np.zeros((m, n_x, n_y))
for j in range(0, n_x):
J[:, j, :] = df(X, j).reshape((m, 1))
return X.T, Y.T, J.T
def contour_plot(genn, title="GENN"):
"""Make contour plots of 2D Rastrigin function and compare to Neural Net prediction"""
model = genn.model
X_train, _, _ = model.training_data
# Domain
lb = -1.0
ub = 1.5
m = 100
x1 = np.linspace(lb, ub, m)
x2 = np.linspace(lb, ub, m)
X1, X2 = np.meshgrid(x1, x2)
# True response
pi = np.pi
Y_true = (
np.power(X1, 2)
- 10 * np.cos(2 * pi * X1)
+ 10
+ np.power(X2, 2)
- 10 * np.cos(2 * pi * X2)
+ 10
)
# Predicted response
Y_pred = np.zeros((m, m))
for i in range(0, m):
for j in range(0, m):
Y_pred[i, j] = model.evaluate(np.array([X1[i, j], X2[i, j]]).reshape(2, 1))
# Prepare to plot
fig = plt.figure(figsize=(6, 3))
spec = gridspec.GridSpec(ncols=2, nrows=1, wspace=0)
# Plot Truth model
ax1 = fig.add_subplot(spec[0, 0])
ax1.contour(X1, X2, Y_true, 20, cmap="RdGy")
anno_opts = dict(
xy=(0.5, 1.075), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("True", **anno_opts)
anno_opts = dict(
xy=(-0.075, 0.5), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("X2", **anno_opts)
anno_opts = dict(
xy=(0.5, -0.05), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("X1", **anno_opts)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.scatter(X_train[0, :], X_train[1, :], s=5)
ax1.set_xlim(lb, ub)
ax1.set_ylim(lb, ub)
# Plot prediction with gradient enhancement
ax2 = fig.add_subplot(spec[0, 1])
ax2.contour(X1, X2, Y_pred, 20, cmap="RdGy")
anno_opts = dict(
xy=(0.5, 1.075), xycoords="axes fraction", va="center", ha="center"
)
ax2.annotate(title, **anno_opts)
anno_opts = dict(
xy=(0.5, -0.05), xycoords="axes fraction", va="center", ha="center"
)
ax2.annotate("X1", **anno_opts)
ax2.set_xticks([])
ax2.set_yticks([])
plt.show()
def run_demo_2d(
alpha=0.1,
beta1=0.9,
beta2=0.99,
lambd=0.1,
gamma=1.0,
deep=3,
wide=6,
mini_batch_size=None,
iterations=30,
epochs=100,
):
"""
Predict Rastrigin function using neural net and compare against truth model. Provided with proper training data,
the only hyperparameters the user needs to tune are:
:param alpha = learning rate
:param beta1 = adam optimizer parameter
:param beta2 = adam optimizer parameter
:param lambd = regularization coefficient
:param gamma = gradient enhancement coefficient
:param deep = neural net depth
:param wide = neural net width
This restricted list is intentional. The goal was to provide a simple interface for common regression tasks
with the bare necessary tuning parameters. More advanced prediction tasks should consider tensorflow or other
deep learning frameworks. Hopefully, the simplicity of this interface will address a common use case in aerospace
engineering, namely: predicting smooth functions using computational design of experiments.
"""
if gamma > 0.0:
title = "GENN"
else:
title = "NN"
# Practice data
X_train, Y_train, J_train = get_practice_data(random=False)
X_test, Y_test, J_test = get_practice_data(random=True)
# Convert training data to SMT format
xt = X_train.T
yt = Y_train.T
dyt_dxt = J_train[
0
].T # SMT format doesn't handle more than one output at a time, hence J[0]
# Convert test data to SMT format
xv = X_test.T
yv = Y_test.T
dyv_dxv = J_test[
0
].T # SMT format doesn't handle more than one output at a time, hence J[0]
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = alpha
genn.options["beta1"] = beta1
genn.options["beta2"] = beta2
genn.options["lambd"] = lambd
genn.options["gamma"] = gamma
genn.options["deep"] = deep
genn.options["wide"] = wide
genn.options["mini_batch_size"] = mini_batch_size
genn.options["num_epochs"] = epochs
genn.options["num_iterations"] = iterations
genn.options["seed"] = SEED
genn.options["is_print"] = True
# Load data
load_smt_data(
genn, xt, yt, dyt_dxt
) # convenience function that uses SurrogateModel.set_training_values(), etc.
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Contour plot
contour_plot(genn, title=title)
def run_demo_1D(is_gradient_enhancement=True): # pragma: no cover
"""Test and demonstrate GENN using a 1D example"""
# Test function
f = lambda x: x * np.sin(x)
df_dx = lambda x: np.sin(x) + x * np.cos(x)
# Domain
lb = -np.pi
ub = np.pi
# Training data
m = 4
xt = np.linspace(lb, ub, m)
yt = f(xt)
dyt_dxt = df_dx(xt)
# Validation data
xv = lb + np.random.rand(30, 1) * (ub - lb)
yv = f(xv)
dyv_dxv = df_dx(xv)
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = 0.05
genn.options["beta1"] = 0.9
genn.options["beta2"] = 0.99
genn.options["lambd"] = 0.05
genn.options["gamma"] = int(is_gradient_enhancement)
genn.options["deep"] = 2
genn.options["wide"] = 6
genn.options["mini_batch_size"] = 64
genn.options["num_epochs"] = 25
genn.options["num_iterations"] = 100
genn.options["seed"] = SEED
genn.options["is_print"] = True
# Load data
load_smt_data(genn, xt, yt, dyt_dxt)
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Plot comparison
if genn.options["gamma"] == 1.0:
title = "with gradient enhancement"
else:
title = "without gradient enhancement"
x = np.arange(lb, ub, 0.01)
y = f(x)
y_pred = genn.predict_values(x)
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title=title)
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
if __name__ == "__main__":
# 1D example: compare with and without gradient enhancement
run_demo_1D(is_gradient_enhancement=False)
run_demo_1D(is_gradient_enhancement=True)
# 2D example: Rastrigin function
run_demo_2d(
alpha=0.1,
beta1=0.9,
beta2=0.99,
lambd=0.1,
gamma=1.0,
deep=3, # 3,
wide=12, # 6,
mini_batch_size=32,
iterations=30,
epochs=25,
)
|
bsd-3-clause
|
larsmans/scikit-learn
|
sklearn/ensemble/__init__.py
|
44
|
1228
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/tests/indexes/test_base.py
|
3
|
78516
|
# -*- coding: utf-8 -*-
import pytest
from datetime import datetime, timedelta
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, MultiIndex
from pandas.tests.indexes.common import Base
from pandas.compat import (range, lrange, lzip, u,
text_type, zip, PY3, PY36)
import operator
import numpy as np
from pandas import (period_range, date_range, Series,
DataFrame, Float64Index, Int64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex,
PeriodIndex, isnull)
from pandas.core.index import _get_combined_index
from pandas.util.testing import assert_almost_equal
from pandas.compat.numpy import np_datetime64_compat
import pandas.core.config as cf
from pandas.core.indexes.datetimes import _to_m8
import pandas as pd
from pandas._libs.lib import Timestamp
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeIntIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
super(TestIndex, self).test_copy_and_deepcopy()
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
def test_construction_list_mixed_tuples(self):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
idx1 = Index([('A', 1), 'B'])
assert isinstance(idx1, Index)
assert not isinstance(idx1, MultiIndex)
idx2 = Index(['B', ('A', 1)])
assert isinstance(idx2, Index)
assert not isinstance(idx2, MultiIndex)
def test_constructor_from_index_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
def test_constructor_from_index_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_index_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
def test_constructor_from_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = Index(s)
tm.assert_index_equal(result, expected)
result = DatetimeIndex(s)
tm.assert_index_equal(result, expected)
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990'], freq='MS')
tm.assert_index_equal(result, expected)
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
expected.name = 'date'
tm.assert_index_equal(result, expected)
assert df['date'].dtype == object
exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990'], name='date')
tm.assert_series_equal(df['date'], exp)
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
assert result == 'MS'
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5), np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with tm.assert_raises_regex(ValueError, msg):
Index(data, dtype='int64')
with tm.assert_raises_regex(ValueError, msg):
Index(data, dtype='uint64')
# This, however, should not break
# because NaN is float.
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
def test_index_ctor_infer_nan_nat(self):
# GH 13467
exp = pd.Float64Index([np.nan, np.nan])
assert exp.dtype == np.float64
tm.assert_index_equal(Index([np.nan, np.nan]), exp)
tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp)
tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
for data in [[pd.NaT, np.nan], [np.nan, pd.NaT],
[np.nan, np.datetime64('nat')],
[np.datetime64('nat'), np.nan]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT])
assert exp.dtype == 'timedelta64[ns]'
for data in [[np.nan, np.timedelta64('nat')],
[np.timedelta64('nat'), np.nan],
[pd.NaT, np.timedelta64('nat')],
[np.timedelta64('nat'), pd.NaT]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
data = [np.timedelta64('nat'), np.datetime64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
tm.assert_index_equal(result, idx)
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
tm.assert_index_equal(result, idx)
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
tm.assert_index_equal(result, idx)
def test_constructor_dtypes(self):
for idx in [Index(np.array([1, 2, 3], dtype=int)),
Index(np.array([1, 2, 3], dtype=int), dtype=int),
Index([1, 2, 3], dtype=int)]:
assert isinstance(idx, Int64Index)
# These should coerce
for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int),
Index([1., 2., 3.], dtype=int)]:
assert isinstance(idx, Int64Index)
for idx in [Index(np.array([1., 2., 3.], dtype=float)),
Index(np.array([1, 2, 3], dtype=int), dtype=float),
Index(np.array([1., 2., 3.], dtype=float), dtype=float),
Index([1, 2, 3], dtype=float),
Index([1., 2., 3.], dtype=float)]:
assert isinstance(idx, Float64Index)
for idx in [Index(np.array([True, False, True], dtype=bool)),
Index([True, False, True]),
Index(np.array([True, False, True], dtype=bool),
dtype=bool),
Index([True, False, True], dtype=bool)]:
assert isinstance(idx, Index)
assert idx.dtype == object
for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),
Index([1, 2, 3], dtype='category'),
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype='category'),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)],
dtype='category')]:
assert isinstance(idx, CategoricalIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:
assert isinstance(idx, DatetimeIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype=object),
Index([datetime(2011, 1, 1),
datetime(2011, 1, 2)], dtype=object)]:
assert not isinstance(idx, DatetimeIndex)
assert isinstance(idx, Index)
assert idx.dtype == object
for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(
1, 'D')])), Index([timedelta(1), timedelta(1)])]:
assert isinstance(idx, TimedeltaIndex)
for idx in [Index(np.array([np.timedelta64(1, 'D'),
np.timedelta64(1, 'D')]), dtype=object),
Index([timedelta(1), timedelta(1)], dtype=object)]:
assert not isinstance(idx, TimedeltaIndex)
assert isinstance(idx, Index)
assert idx.dtype == object
def test_constructor_dtypes_datetime(self):
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('2011-01-01', periods=5, tz=tz)
dtype = idx.dtype
# pass values without timezone, as DatetimeIndex localizes it
for values in [pd.date_range('2011-01-01', periods=5).values,
pd.date_range('2011-01-01', periods=5).asi8]:
for res in [pd.Index(values, tz=tz),
pd.Index(values, dtype=dtype),
pd.Index(list(values), tz=tz),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with DatetimeIndex
for res in [pd.DatetimeIndex(values, tz=tz),
pd.DatetimeIndex(values, dtype=dtype),
pd.DatetimeIndex(list(values), tz=tz),
pd.DatetimeIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_constructor_dtypes_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
dtype = idx.dtype
for values in [idx.values, idx.asi8]:
for res in [pd.Index(values, dtype=dtype),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with TimedeltaIndex
for res in [pd.TimedeltaIndex(values, dtype=dtype),
pd.TimedeltaIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
# different length
assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b']))
# same length, different values
assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd']))
# Must also be an Index
assert not Index(['a', 'b', 'c']).equals(['a', 'b', 'c'])
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(ind.view(np.ndarray).view(Index))
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isnull(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = idx.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',
'ns')
assert first_value == x[Timestamp(exp_ts)]
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
assert idx[[]].identical(empty_idx)
assert idx[empty_iarr].identical(empty_idx)
assert idx[empty_barr].identical(empty_idx)
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
pytest.raises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
assert exp == arr[5]
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
assert inter is first
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
tm.assert_index_equal(result2, expected2)
assert result2.name == expected2.name
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
tm.assert_index_equal(result3, expected3)
assert result3.name == expected3.name
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
expected = Index([5, 3, 4], name='idx')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
idx2 = Index([4, 7, 6, 5, 3], name='other')
expected = Index([5, 3, 4], name=None)
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
# non-monotonic non-unique
idx1 = Index(['A', 'B', 'A', 'C'])
idx2 = Index(['B', 'D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
idx2 = Index(['B', 'D', 'A'])
expected = Index(['A', 'B', 'A'], dtype='object')
result = idx1.intersection(idx2)
tm.assert_index_equal(result, expected)
# preserve names
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = 'A'
second.name = 'A'
intersect = first.intersection(second)
assert intersect.name == 'A'
second.name = 'B'
intersect = first.intersection(second)
assert intersect.name is None
first.name = None
second.name = 'B'
intersect = first.intersection(second)
assert intersect.name is None
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
# Corner cases
union = first.union(first)
assert union is first
union = first.union([])
assert union is first
union = Index([]).union(first)
assert union is first
# preserve names
first = Index(list('ab'), name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index([])
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([])
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
idx = self.strIndex
expected = Index(self.strIndex.values * 2)
tm.assert_index_equal(idx + idx, expected)
tm.assert_index_equal(idx + idx.tolist(), expected)
tm.assert_index_equal(idx.tolist() + idx, expected)
# test add and radd
idx = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
tm.assert_index_equal(idx + '1', expected)
expected = Index(['1a', '1b', '1c'])
tm.assert_index_equal('1' + idx, expected)
def test_sub(self):
idx = self.strIndex
pytest.raises(TypeError, lambda: idx - 'a')
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx - idx.tolist())
pytest.raises(TypeError, lambda: idx.tolist() - idx)
def test_map_identity_mapping(self):
# GH 12766
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
boolean_index = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(0,), (1,), (2,)])
tm.assert_index_equal(boolean_index, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
boolean_index = tm.makeIntIndex(3).map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(0, False), (1, True), (2, False)])
tm.assert_index_equal(boolean_index, expected)
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
def test_map_tseries_indices_return_index(self):
date_index = tm.makeDateIndex(10)
exp = Index([1] * 10)
tm.assert_index_equal(exp, date_index.map(lambda x: 1))
period_index = tm.makePeriodIndex(10)
tm.assert_index_equal(exp, period_index.map(lambda x: 1))
tdelta_index = tm.makeTimedeltaIndex(10)
tm.assert_index_equal(exp, tdelta_index.map(lambda x: 1))
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
exp = Index(range(24), name='hourly')
tm.assert_index_equal(exp, date_index.map(lambda x: x.hour))
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
assert result.name == 'foo'
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
assert result.name is None
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
assert 'a' not in index2
assert 'afoo' in index2
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
assert 'a' in index
index += '_x'
assert 'a_x' in index
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
assert tm.equalContents(result, answer)
assert result.name is None
# same names
second.name = 'name'
result = first.difference(second)
assert result.name == 'name'
# with empty
result = first.difference([])
assert tm.equalContents(result, first)
assert result.name == first.name
# with everything
result = first.difference(first)
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.symmetric_difference(idx2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = idx1 ^ idx2
assert tm.equalContents(result, expected)
assert result.name is None
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.symmetric_difference(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
# nans:
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
idx1 = Index([1, np.nan, 2, 3])
idx2 = Index([0, 1, np.nan])
idx3 = Index([0, 1])
result = idx1.symmetric_difference(idx2)
expected = Index([0.0, 2.0, 3.0])
tm.assert_index_equal(result, expected)
result = idx1.symmetric_difference(idx3)
expected = Index([0.0, 2.0, 3.0, np.nan])
tm.assert_index_equal(result, expected)
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.symmetric_difference(idx2)
assert tm.equalContents(result, expected)
assert result.name == 'idx1'
result = idx1.symmetric_difference(idx2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
def test_is_numeric(self):
assert not self.dateIndex.is_numeric()
assert not self.strIndex.is_numeric()
assert self.intIndex.is_numeric()
assert self.floatIndex.is_numeric()
assert not self.catIndex.is_numeric()
def test_is_object(self):
assert self.strIndex.is_object()
assert self.boolIndex.is_object()
assert not self.catIndex.is_object()
assert not self.intIndex.is_object()
assert not self.dateIndex.is_object()
assert not self.floatIndex.is_object()
def test_is_all_dates(self):
assert self.dateIndex.is_all_dates
assert not self.strIndex.is_all_dates
assert not self.intIndex.is_all_dates
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formating does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
# 2845
index = Index([1, 2.0 + 3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
assert formatted == expected
# is this really allowed?
index = Index([1, 2.0 + 3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
assert formatted == expected
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
assert idx[3] is None
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
def test_get_indexer_invalid(self):
# GH10411
idx = Index(np.arange(10))
with tm.assert_raises_regex(ValueError, 'tolerance argument'):
idx.get_indexer([1, 0], tolerance=1)
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], limit=1)
def test_get_indexer_nearest(self):
idx = Index(np.arange(10))
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9],
[0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=1)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1],
[0, 2, -1]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=0.2)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
with tm.assert_raises_regex(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
def test_get_indexer_nearest_decreasing(self):
idx = Index(np.arange(10))[::-1]
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],
dtype=np.intp))
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],
[9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = np.array([-1, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = np.array([0, 0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
for method in all_methods:
assert idx.get_loc(1, method=method) == 1
if method is not None:
assert idx.get_loc(1, method=method, tolerance=0) == 1
with pytest.raises(TypeError):
idx.get_loc([1, 2], method=method)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc(1.1, method) == loc
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc(1.1, method, tolerance=1) == loc
for method in ['pad', 'backfill', 'nearest']:
with pytest.raises(KeyError):
idx.get_loc(1.1, method, tolerance=0.05)
with tm.assert_raises_regex(ValueError, 'must be numeric'):
idx.get_loc(1.1, 'nearest', tolerance='invalid')
with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'):
idx.get_loc(1.1, tolerance=1)
idx = pd.Index(['a', 'c'])
with pytest.raises(TypeError):
idx.get_loc('a', method='nearest')
with pytest.raises(TypeError):
idx.get_loc('a', method='pad', tolerance='invalid')
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
assert idx.slice_locs(start=2) == (2, n)
assert idx.slice_locs(start=3) == (3, n)
assert idx.slice_locs(3, 8) == (3, 6)
assert idx.slice_locs(5, 10) == (3, n)
assert idx.slice_locs(end=8) == (0, 6)
assert idx.slice_locs(end=9) == (0, 7)
# reversed
idx2 = idx[::-1]
assert idx2.slice_locs(8, 2) == (2, 6)
assert idx2.slice_locs(7, 3) == (2, 5)
# float slicing
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(idx)
assert idx.slice_locs(5.0, 10.0) == (3, n)
assert idx.slice_locs(4.5, 10.5) == (3, 8)
idx2 = idx[::-1]
assert idx2.slice_locs(8.5, 1.5) == (2, 6)
assert idx2.slice_locs(10.5, -1) == (0, n)
# int slicing with floats
# GH 4892, these are all TypeErrors
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
pytest.raises(TypeError,
lambda: idx.slice_locs(5.0, 10.0), (3, n))
pytest.raises(TypeError,
lambda: idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
pytest.raises(TypeError,
lambda: idx2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert idx.slice_locs('a', 'd') == (0, 6)
assert idx.slice_locs(end='d') == (0, 6)
assert idx.slice_locs('a', 'c') == (0, 4)
assert idx.slice_locs('b', 'd') == (2, 6)
idx2 = idx[::-1]
assert idx2.slice_locs('d', 'a') == (0, 6)
assert idx2.slice_locs(end='a') == (0, 6)
assert idx2.slice_locs('d', 'b') == (0, 4)
assert idx2.slice_locs('c', 'a') == (2, 6)
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert idx.slice_locs(12, 12) == (1, 3)
assert idx.slice_locs(11, 13) == (1, 3)
idx2 = idx[::-1]
assert idx2.slice_locs(12, 12) == (1, 3)
assert idx2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
pytest.raises(KeyError, idx.slice_locs, start=1.5)
pytest.raises(KeyError, idx.slice_locs, end=1.5)
assert idx.slice_locs(1) == (1, 3)
assert idx.slice_locs(np.nan) == (0, 3)
idx = Index([0, np.nan, np.nan, 1, 2])
assert idx.slice_locs(np.nan) == (1, 5)
def test_slice_locs_negative_step(self):
idx = Index(list('bcdxy'))
SLC = pd.IndexSlice
def check_slice(in_slice, expected):
s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = idx[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
for in_slice, expected in [
(SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),
(SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),
(SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),
(SLC['y'::-4], 'yb'),
# absent labels
(SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),
(SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),
(SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),
(SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),
(SLC['m':'m':-1], '')
]:
check_slice(in_slice, expected)
def test_drop(self):
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
pytest.raises(ValueError, self.strIndex.drop, ['foo', 'bar'])
pytest.raises(ValueError, self.strIndex.drop, ['1', 'bar'])
# errors='ignore'
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
# errors='ignore'
pytest.raises(ValueError, ser.drop, [3, 4])
dropped = ser.drop(4, errors='ignore')
expected = Index([1, 2, 3])
tm.assert_index_equal(dropped, expected)
dropped = ser.drop([3, 4, 5], errors='ignore')
expected = Index([1, 2])
tm.assert_index_equal(dropped, expected)
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
assert int_idx.ndim == 1
tm.assert_index_equal(int_idx, expected)
# union broken
union_idx = idx1.union(idx2)
expected = idx2
assert union_idx.ndim == 1
tm.assert_index_equal(union_idx, expected)
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
assert not index.is_monotonic
assert not index.is_monotonic_decreasing
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# set
result = idx.isin(set(values))
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
def test_isin_nan(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]),
np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),
np.array([False, False]))
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
pytest.raises(IndexError, idx.isin, values, level=1)
pytest.raises(IndexError, idx.isin, values, level=10)
pytest.raises(IndexError, idx.isin, values, level=-2)
pytest.raises(KeyError, idx.isin, values, level=1.0)
pytest.raises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
tm.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
pytest.raises(KeyError, idx.isin, values, level='xyzzy')
pytest.raises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
tm.assert_numpy_array_equal(res, np.array(
[True, True, True, True], dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
tm.assert_index_equal(result, self.strIndex)
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
assert idx.name == idx[1:].name
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
assert res is joined
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
idx = Index([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Index([getattr(str, method)(x) for x in idx.values])
tm.assert_index_equal(
getattr(Index.str, method)(idx.str), expected)
# create a few instances that are not able to use .str accessor
indices = [Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')]
for idx in indices:
with tm.assert_raises_regex(AttributeError,
'only use .str accessor'):
idx.str.repeat(2)
idx = Index(['a b c', 'd e', 'f'])
expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])
tm.assert_index_equal(idx.str.split(), expected)
tm.assert_index_equal(idx.str.split(expand=False), expected)
expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)])
tm.assert_index_equal(idx.str.split(expand=True), expected)
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)
assert isinstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(s[s.index.str.startswith('a')], expected)
def test_tab_completion(self):
# GH 9910
idx = Index(list('abcd'))
assert 'str' in dir(idx)
idx = Index(range(4))
assert 'str' not in dir(idx)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
assert idx[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert idx[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_take_fill_value(self):
# GH 12631
idx = pd.Index(list('ABC'), name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_reshape_raise(self):
msg = "reshaping is not supported"
idx = pd.Index([0, 1, 2])
tm.assert_raises_regex(NotImplementedError, msg,
idx.reshape, idx.shape)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
assert idx.reindex([])[0].name is None
assert idx.reindex(np.array([]))[0].name is None
assert idx.reindex(idx.tolist())[0].name is None
assert idx.reindex(idx.tolist()[:-1])[0].name is None
assert idx.reindex(idx.values)[0].name is None
assert idx.reindex(idx.values[:-1])[0].name is None
# Must preserve name even if dtype changes.
assert idx.reindex(dt_idx.values)[0].name is None
assert idx.reindex(dt_idx.tolist())[0].name is None
idx.name = 'foobar'
assert idx.reindex([])[0].name == 'foobar'
assert idx.reindex(np.array([]))[0].name == 'foobar'
assert idx.reindex(idx.tolist())[0].name == 'foobar'
assert idx.reindex(idx.tolist()[:-1])[0].name == 'foobar'
assert idx.reindex(idx.values)[0].name == 'foobar'
assert idx.reindex(idx.values[:-1])[0].name == 'foobar'
# Must preserve name even if dtype changes.
assert idx.reindex(dt_idx.values)[0].name == 'foobar'
assert idx.reindex(dt_idx.tolist())[0].name == 'foobar'
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
assert get_reindex_type([]) == np.object_
assert get_reindex_type(np.array([])) == np.object_
assert get_reindex_type(np.array([], dtype=np.int64)) == np.object_
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
assert get_reindex_type(pd.Int64Index([])) == np.int64
assert get_reindex_type(pd.Float64Index([])) == np.float64
assert get_reindex_type(pd.DatetimeIndex([])) == np.datetime64
reindexed = idx.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert reindexed.levels[0].dtype.type == np.int64
assert reindexed.levels[1].dtype.type == np.float64
def test_groupby(self):
idx = Index(range(5))
groups = idx.groupby(np.array([1, 1, 2, 2, 2]))
exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(groups, exp)
def test_equals_op_multiindex(self):
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
tm.assert_numpy_array_equal(df.index == df.index,
np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
df.index == mi3
index_a = Index(['foo', 'bar', 'baz'])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
df.index == index_a
tm.assert_numpy_array_equal(index_a == mi3,
np.array([False, False, False]))
def test_conversion_preserves_name(self):
# GH 10875
i = pd.Index(['01:02:03', '01:02:04'], name='label')
assert i.name == pd.to_datetime(i).name
assert i.name == pd.to_timedelta(i).name
def test_string_index_repr(self):
# py3/py2 repr can differ because of "u" prefix
# which also affects to displayed element size
if PY3:
coerce = lambda x: x
else:
coerce = unicode # noqa
# short
idx = pd.Index(['a', 'bb', 'ccc'])
if PY3:
expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""
assert coerce(idx) == expected
# multiple lines
idx = pd.Index(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""
assert coerce(idx) == expected
# truncated
idx = pd.Index(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""
assert repr(idx) == expected
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""
assert coerce(idx) == expected
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""
assert repr(idx) == expected
else:
expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""
assert coerce(idx) == expected
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
assert coerce(idx) == expected
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
assert coerce(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")
assert coerce(idx) == expected
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
assert coerce(idx) == expected
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)")
assert repr(idx) == expected
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
assert coerce(idx) == expected
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
idx = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>' not supported"):
result = idx.argsort()
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = idx.argsort()
else:
result = idx.argsort()
expected = np.array(idx).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
idx = self.create_index()
if PY36:
with tm.assert_raises_regex(TypeError, "'>' not supported"):
result = np.argsort(idx)
elif PY3:
with tm.assert_raises_regex(TypeError, "unorderable types"):
result = np.argsort(idx)
else:
result = np.argsort(idx)
expected = idx.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
idx = self.create_index()
first = idx.__class__(idx, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ:
assert idx.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
warning_type = RuntimeWarning if PY3 else None
with tm.assert_produces_warning(warning_type):
# Python 3: Unorderable types
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
idx = pd.Index([1, 2], name='MyName')
idx1 = idx.copy()
assert idx.equals(idx1)
assert idx.name == 'MyName'
assert idx1.name == 'MyName'
idx2 = idx.copy(name='NewName')
assert idx.equals(idx2)
assert idx.name == 'MyName'
assert idx2.name == 'NewName'
idx3 = idx.copy(names=['NewName'])
assert idx.equals(idx3)
assert idx.name == 'MyName'
assert idx.names == ['MyName']
assert idx3.name == 'NewName'
assert idx3.names == ['NewName']
def test_union_base(self):
idx = self.create_index()
first = idx[3:]
second = idx[:5]
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
else:
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(case)
assert tm.equalContents(result, idx)
else:
result = first.union(case)
assert tm.equalContents(result, idx)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:5]
second = idx[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
tm.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_dropna(self):
# GH 6194
for dtype in [None, object, 'category']:
idx = pd.Index([1, 2, 3], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
idx = pd.Index([1., 2., 3.], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.Index(['A', 'B', 'C'], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
tm.assert_index_equal(nanidx.dropna(how='any'), idx)
tm.assert_index_equal(nanidx.dropna(how='all'), idx)
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days',
'3 days', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'],
freq='M')
tm.assert_index_equal(nanidx.dropna(), idx)
msg = "invalid how option: xxx"
with tm.assert_raises_regex(ValueError, msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
tm.assert_index_equal(result, Index([]))
def test_repeat(self):
repeats = 2
idx = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = idx.repeat(repeats)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.repeat(n=repeats)
tm.assert_index_equal(result, expected)
def test_is_monotonic_na(self):
examples = [pd.Index([np.nan]),
pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]),
pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']), ]
for index in examples:
assert not index.is_monotonic_increasing
assert not index.is_monotonic_decreasing
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
assert len(r) < 200
assert "..." in r
def test_int_name_format(self):
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if PY3:
str(idx)
else:
text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if PY3:
bytes(idx)
else:
str(idx)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
assert len(res) == 0
|
mit
|
YihaoLu/statsmodels
|
examples/python/robust_models_1.py
|
25
|
8588
|
## M-Estimators for Robust Linear Modeling
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
# * An M-estimator minimizes the function
#
# $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$
#
# where $\rho$ is a symmetric function of the residuals
#
# * The effect of $\rho$ is to reduce the influence of outliers
# * $s$ is an estimate of scale.
# * The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm
# * We have several choices available for the weighting functions to be used
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
#### Andrew's Wave
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
#### Hampel's 17A
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Huber's t
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
#### Least Squares
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
#### Ramsay's Ea
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
#### Trimmed Mean
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Tukey's Biweight
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Scale Estimators
# * Robust estimates of the location
x = np.array([1, 2, 3, 4, 500])
# * The mean is not a robust estimator of location
x.mean()
# * The median, on the other hand, is a robust estimator with a breakdown point of 50%
np.median(x)
# * Analagously for the scale
# * The standard deviation is not robust
x.std()
# Median Absolute Deviation
#
# $$ median_i |X_i - median_j(X_j)|) $$
# Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$
#
# $$\hat{\sigma}=K \cdot MAD$$
#
# where $K$ depends on the distribution. For the normal distribution for example,
#
# $$K = \Phi^{-1}(.75)$$
stats.norm.ppf(.75)
print(x)
sm.robust.scale.stand_mad(x)
np.array([1,2,3,4,5.]).std()
# * The default for Robust Linear Models is MAD
# * another popular choice is Huber's proposal 2
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDE(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.stand_mad(fat_tails)
sm.robust.stand_mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
#### Duncan's Occupational Prestige data - M-estimation for outliers
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.ix['minister'][['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.ix[np.abs(student) > 2])
print(infl.summary_frame().ix['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
#### Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
# * Data is on the luminosity and temperature of 47 stars in the direction of Cygnus.
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.ix[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
# * Why? Because M-estimators are not robust to leverage points.
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.ix[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort('unadj_p', inplace=True)
print(fdr2)
# * Let's delete that line
del ax.lines[-1]
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
# * MM estimators are good for this type of problem, unfortunately, we don't yet have these yet.
# * It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook.
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
get_ipython().magic(u'load_ext rmagic')
get_ipython().magic(u'R library(robustbase)')
get_ipython().magic(u'Rpush yy xx')
get_ipython().magic(u'R mod <- lmrob(yy ~ xx);')
get_ipython().magic(u'R params <- mod$coefficients;')
get_ipython().magic(u'Rpull params')
get_ipython().magic(u'R print(mod)')
print(params)
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='green')
#### Exercise: Breakdown points of M-estimator
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = map(se_loss, all_betas - beta_true)
##### Squared error loss
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
|
bsd-3-clause
|
jorahn/keras-wide-n-deep
|
wide_deep_keras.py
|
1
|
2717
|
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Merge
from sklearn.preprocessing import MinMaxScaler
COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num", "marital_status",
"occupation", "relationship", "race", "gender", "capital_gain", "capital_loss",
"hours_per_week", "native_country", "income_bracket"
]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = [
"workclass", "education", "marital_status", "occupation", "relationship",
"race", "gender", "native_country"
]
CONTINUOUS_COLUMNS = [
"age", "education_num", "capital_gain", "capital_loss", "hours_per_week"
]
def load(filename):
with open(filename, 'r') as f:
skiprows = 1 if 'test' in filename else 0
df = pd.read_csv(
f, names=COLUMNS, skipinitialspace=True, skiprows=skiprows, engine='python'
)
df = df.dropna(how='any', axis=0)
return df
def preprocess(df):
df[LABEL_COLUMN] = df['income_bracket'].apply(lambda x: ">50K" in x).astype(int)
df.pop("income_bracket")
y = df[LABEL_COLUMN].values
df.pop(LABEL_COLUMN)
df = pd.get_dummies(df, columns=[x for x in CATEGORICAL_COLUMNS])
# TODO: select features for wide & deep parts
# TODO: transformations (cross-products)
# from sklearn.preprocessing import PolynomialFeatures
# X = PolynomialFeatures(degree=2, interaction_only=True, include_bias=False).fit_transform(X)
df = pd.DataFrame(MinMaxScaler().fit_transform(df), columns=df.columns)
X = df.values
return X, y
def main():
df_train = load('adult.data')
df_test = load('adult.test')
df = pd.concat([df_train, df_test])
train_len = len(df_train)
X, y = preprocess(df)
X_train = X[:train_len]
y_train = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
wide = Sequential()
wide.add(Dense(1, input_dim=X_train.shape[1]))
deep = Sequential()
# TODO: add embedding
deep.add(Dense(input_dim=X_train.shape[1], output_dim=100, activation='relu'))
deep.add(Dense(100, activation='relu'))
deep.add(Dense(50, activation='relu'))
deep.add(Dense(1, activation='sigmoid'))
model = Sequential()
model.add(Merge([wide, deep], mode='concat', concat_axis=1))
model.add(Dense(1, activation='sigmoid'))
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit([X_train, X_train], y_train, nb_epoch=10, batch_size=32)
loss, accuracy = model.evaluate([X_test, X_test], y_test)
print('\n', 'test accuracy:', accuracy)
if __name__ == '__main__':
main()
|
mit
|
francesco-mannella/dmp-esn
|
DMP/stulp/src/dmp/demos/demoDmpContextualGoal.py
|
2
|
2934
|
## \file demoDmpContextual.py
## \author Freek Stulp
## \brief Visualizes results of demoDmpContextual.cpp
##
## \ingroup Demos
## \ingroup Dmps
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os, sys, subprocess
lib_path = os.path.abspath('../plotting')
sys.path.append(lib_path)
from plotTrajectory import plotTrajectoryFromFile
from plotDmp import plotDmp
executable = "../../../bin/demoDmpContextualGoal"
if (not os.path.isfile(executable)):
print ""
print "ERROR: Executable '"+executable+"' does not exist."
print "Please call 'make install' in the build directory first."
print ""
sys.exit(-1);
# Call the executable with the directory to which results should be written
main_directory = "/tmp/demoDmpContextualGoal"
# Test both 1-step and 2-step Dmps
for n_dmp_contextual_step in [1, 2]:
print "_______________________________________________________________"
print "Demo for "+str(n_dmp_contextual_step)+"-step contextual Dmp"
directory = main_directory + "/Step"+str(n_dmp_contextual_step)
command = executable+" "+directory+" "+str(n_dmp_contextual_step)
print command
subprocess.call(command, shell=True)
print "Plotting"
task_parameters_demos = numpy.loadtxt(directory+"/task_parameters_demos.txt")
task_parameters_repros = numpy.loadtxt(directory+"/task_parameters_repros.txt")
n_demos = len(task_parameters_demos)
n_repros = len(task_parameters_repros)
fig = plt.figure(n_dmp_contextual_step)
fig.suptitle(str(n_dmp_contextual_step)+"-step Contextual Dmp")
axs = [ fig.add_subplot(131), fig.add_subplot(132), fig.add_subplot(133) ]
for i_demo in range(n_demos):
filename = "demonstration0"+str(i_demo);
lines = plotTrajectoryFromFile(directory+"/"+filename+".txt",axs)
plt.setp(lines, linestyle='-', linewidth=4, color=(0.7,0.7,1.0), label=filename)
for i_repro in range(n_repros):
filename = "reproduced0"+str(i_repro);
lines = plotTrajectoryFromFile(directory+"/"+filename+".txt",axs)
plt.setp(lines, linestyle='-', linewidth=1, color=(0.5,0.0,0.0), label=filename)
#ax = fig.add_subplot(144)
#inputs = numpy.loadtxt(directory+'/inputs.txt')
#targets = numpy.loadtxt(directory+'/targets.txt')
#lines = ax.plot(inputs[:,0],targets,linestyle='-',color='red')
#ax.set_xlabel('input')
#ax.set_ylabel('targets')
#ax = fig.add_subplot(155)
#for i_repro in range(n_repros):
# filename = directory+"/"+"reproduced_forcingterm0"+str(i_repro)+".txt";
# forcingterm = numpy.loadtxt(filename);
# lines = ax.plot(numpy.linspace(0,1,len(forcingterm)),forcingterm)
# plt.setp(lines, linestyle='-', linewidth=2, color=(0.0,0.0,0.5), label=filename)
#ax.set_xlabel('phase')
#ax.set_ylabel('forcing term')
#plt.legend()
plt.show()
|
gpl-2.0
|
vdrhtc/Measurement-automation
|
scripts/photon_wave_mixing/helpers.py
|
1
|
3114
|
from scipy.ndimage import gaussian_filter1d
from scipy.optimize import curve_fit
import scipy.fft as fp
import numpy as np
import pickle
import matplotlib.pyplot as plt
import lib.plotting as plt2
def parse_probe_qubit_sts(freqs, S21):
amps = np.abs(S21)
frequencies = freqs[gaussian_filter1d(amps, sigma=1).argmin(axis=-1)]
return frequencies
def parse_sps_sts(freqs, S21):
amps = np.abs(S21)
frequencies = freqs[gaussian_filter1d(amps, sigma=10).argmax(axis=-1)]
return frequencies
def qubit_fit_func(x, a, b, c):
return a * (x - b)**2 + c
def fit_probe_qubit_sts(filename, plot=True):
with open(filename, 'rb') as f:
data = pickle.load(f)
currents = data['bias, [A]']
freqs = data['Frequency [Hz]']
S21 = data['data']
frequencies = parse_probe_qubit_sts(freqs, S21)
popt, conv = curve_fit(qubit_fit_func, currents, frequencies,
p0=(-1e16, -2.5e-3, 5.15e9))
if plot:
xx, yy = np.meshgrid(currents, freqs)
plt2.plot_2D(xx, yy,
np.transpose(gaussian_filter1d(np.abs(S21), sigma=20)))
plt.figure()
plt.plot(currents, frequencies, 'o')
plt.plot(currents, qubit_fit_func(currents, *popt))
plt.margins(x=0)
plt.xlabel("Current, A")
plt.ylabel("Qubit if_freq, Hz")
plt.show()
return popt
def fit_sps_sts(filename, plot=True):
with open(filename, 'rb') as f:
data = pickle.load(f)
currents = data['bias, [A]']
freqs = data['Frequency [Hz]']
S21 = data['data']
frequencies = parse_sps_sts(freqs, S21)
popt, conv = curve_fit(qubit_fit_func, currents, frequencies,
p0=(-1e15, -5e-4, 5.15e9))
if plot:
xx, yy = np.meshgrid(currents, freqs)
plt2.plot_2D(xx, yy,
np.transpose(gaussian_filter1d(np.abs(S21), sigma=10)))
plt.figure()
plt.plot(currents, frequencies, 'o')
plt.plot(currents, qubit_fit_func(currents, *popt))
plt.margins(x=0)
plt.xlabel("Current, A")
plt.ylabel("Qubit if_freq, Hz")
plt.show()
return popt
def get_current(frequency, a, b, c):
current = b + np.sqrt((frequency - c) / a)
return current
def remove_outliers():
pass
def get_signal_amplitude(downconverted_trace):
N = len(downconverted_trace)
return np.abs(fp.fft(downconverted_trace)[0] / N)
def get_noise(downconverted_trace):
return np.std(downconverted_trace)
def measure_snr(devices_dict):
# turn off microwave
devices_dict['mw'].set_output_state("ON")
# turn off AWG
devices_dict['awg'].reset()
devices_dict['awg'].synchronize_channels(channelI, channelQ)
devices_dict['awg'].trigger_output_config(channel=channelI,
trig_length=100)
devices_dict['awg'].stop_AWG(channel=channelI)
devices_dict['iqawg'].set_parameters({"calibration": devices_dict['upconv_cal']})
devices_dict['iqawg'].output_IQ_waves_from_calibration(
amp_coeffs=(0.5, 0.5))
|
gpl-3.0
|
arahuja/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
moutai/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
28
|
3652
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
ashhher3/scikit-learn
|
sklearn/neighbors/tests/test_dist_metrics.py
|
48
|
4949
|
import itertools
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
assert_array_almost_equal(D1, D2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
Adai0808/scikit-learn
|
sklearn/tree/tests/test_export.py
|
130
|
9950
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
harshaneelhg/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/cluster/tests/test_dbscan.py
|
114
|
11393
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
apache-2.0
|
paladin74/neural-network-animation
|
matplotlib/testing/decorators.py
|
11
|
11990
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib.tests
import matplotlib.units
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry):
plt.close('all')
gc.collect()
matplotlib.tests.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
if module_name == '__main__':
# FIXME: this won't work for nested packages in matplotlib.tests
warnings.warn('test module run as script. guessing baseline image locations')
script_name = sys.argv[0]
basedir = os.path.abspath(os.path.dirname(script_name))
subdir = os.path.splitext(os.path.split(script_name)[1])[0]
else:
mods = module_name.split('.')
mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
assert mods.pop(0) == 'tests'
subdir = os.path.join(*mods)
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
|
mit
|
daichi-yoshikawa/dnn
|
dnnet/neuralnet.py
|
1
|
15563
|
# Authors: Daichi Yoshikawa <[email protected]>
# License: BSD 3 clause
import os, sys
import matplotlib.pyplot as plt
import pickle
import time
import logging
logger = logging.getLogger('dnnet.log')
import dnnet
from dnnet.exception import DNNetIOError, DNNetRuntimeError
from dnnet.ext_mathlibs import cp, np
from dnnet.utils.nn_utils import prod, shuffle_data, split_data, w2im
from dnnet.utils.nn_utils import is_multi_channels_image, flatten, unflatten
from dnnet.training.back_propagation import BackPropagation
from dnnet.layers.layer import Layer, InputLayer, OutputLayer
class NeuralNetwork:
"""Interface of neural network.
Training of model and prediction with resulting model
is done through this class.
Parameters
----------
layers : np.array of derived class of Layer
Layers to build neural network.
The first layer must be InputLayer and last layer must be OutputLayer.
dtype : type
Data type selected through constructor.
"""
@classmethod
def load(self, name, path=None):
"""Load model from storage.
Arguments
---------
name : str or None, default None
Name of the desired file. Doesn't include path.
path : str or None, default None
Full path to the directory where the desired file is contained.
If None, file is loaded from a directory where script runs.
Returns
-------
NeuralNetwork
Returns model.
"""
if path is None:
path = '.'
if path[0] == '~':
path = os.getenv("HOME") + path[1:]
try:
with open(path + '/' + name, 'rb') as f:
return pickle.load(f)
except IOError as e:
msg = str(e) + '\nNeuralNetwork.load failed.'
raise DNNetIOError(msg)
def __init__(self, input_shape, dtype=np.float32):
"""
Arguments
---------
dtype : type, default np.float32
Data type to use.
"""
self.layers = np.array([], dtype=Layer)
self.dtype = dtype
self.add(InputLayer(input_shape=input_shape))
def add(self, layer):
"""Add instance of derived class of layer.
Build neural network by adding layers one by one with this method.
Arguments
---------
layer : Derived class of Layer
Instance of derived class of Layer.
"""
layer.set_dtype(self.dtype)
self.layers = np.append(self.layers, layer)
def compile(self):
"""Finalize configuration of neural network model.
Warning
-------
This method must be called after adding all required layers
and before starting training.
"""
logger.info('Define network with dnnet of version : %s'\
% dnnet.__version__)
if self.layers.size == 0:
msg = 'NeuralNetwork has no layer.\n Add layers before compiling.'
raise DNNetRuntimeError(msg)
parent = self.layers[0]
self.add(OutputLayer())
for i, layer in enumerate(self.layers, 1):
logger.debug('Add %s layer.' % layer.get_type())
layer.set_parent(parent)
parent = layer
logger.debug('Defined network.')
def fit(self, x, y, optimizer, loss_function, **kwargs):
"""Train model.
Arguments
---------
x : np.array
Descriptive features in 2d array,
whose shape is (num of data, num of feature)
y : np.array
Target features in 2d array,
whose shape is (num of data, num of feature)
optimizer : Derived class of Optimizer
Instance of derived class of Optimizer.
loss_function : Derived class of LossFunction
Used to calculate loss.
epochs : int, default 10
Number of iterations of training.
1 iteration scans all batches one time.
batch_size : int, default 100
Dataset is splitted into multiple mini batches
whose size is this.
learning_curve : bool, default True
Prints out evaluation results of ongoing training.
Also, returns learning curve after completion of training.
shuffle : bool, default True
Shuffle dataset one time before training.
shuffle_per_epoch : bool, default False
Shuffle training data every epoch.
test_data_ratio : float, default 0
Ratio of test data. If 0, all data is used for training.
train_data_ratio_for_eval : float, default 1.0
Ratio of training data to calculate accuracy w.r.t training data.
Returns
-------
LearningCurve
Instance of LearningCurve, which contains
losses and accuracies for train and test data.
Warning
-------
This method assumes that x and y include all data you use.
If your data set is so large that all data cannot be stored in memory,
you cannot use this method. Use fit_genenerator instead.
"""
start = time.time()
epochs = kwargs.pop('epochs', 10)
batch_size = kwargs.pop('batch_size', 100)
learning_curve = kwargs.pop('learning_curve', True)
shuffle = kwargs.pop('shuffle', True)
shuffle_per_epoch = kwargs.pop('shuffle_per_epoch', False)
test_data_ratio = kwargs.pop('test_data_ratio', self.dtype(0.))
train_data_ratio_for_eval = kwargs.pop(
'train_data_ratio_for_eval', 1.0)
logger.info('\n--- Parameters ---\nepochs: %d\nbatch_size: %d\n'
'learning_curve: %r\nshuffle: %r\nshuffle_per_epoch: %r\n'
'test_data_ratio: %f\ntest_data_ratio_for_eval: %f\n'
'optimizer: %s\nloss_function: %s'
% (epochs, batch_size, learning_curve, shuffle,
shuffle_per_epoch, test_data_ratio,
train_data_ratio_for_eval, optimizer.get_type(),
loss_function.get_type()))
if shuffle:
logger.debug('shuffle data.')
x, y = shuffle_data(x, y)
x, y = self.__convert_dtype(x, y)
x_train, y_train, x_test, y_test = split_data(x, y, test_data_ratio)
logger.info('Train data input, output : %s, %s'
% (x_train.shape, y_train.shape))
logger.info('Test data input, output : %s, %s'
% (x_test.shape, y_test.shape))
back_prop = BackPropagation(
epochs, batch_size, optimizer, loss_function,
learning_curve, self.dtype)
np_err_config = np.seterr('raise')
try:
logger.info('Fitting model starts.')
lc = back_prop.fit(
self.layers, x_train, y_train, x_test, y_test,
shuffle_per_epoch, batch_size, train_data_ratio_for_eval)
except FloatingPointError as e:
msg = str(e) + '\nOverflow or underflow occurred. '\
+ 'Retry with smaller learning_rate or '\
+ 'larger weight_decay for Optimizer.'
raise DNNetRuntimeError(msg)
except Exception as e:
raise DNNetRuntimeError(e)
finally:
np.seterr(
divide=np_err_config['divide'],
over=np_err_config['over'],
under=np_err_config['under'],
invalid=np_err_config['invalid']
)
end = time.time()
logger.info('Fitting model is done. '
'Processing time : %.2f[s]\n' % (end - start))
return lc
def fit_generator(self, x, y, optimizer, loss_function, **kwargs):
"""Train model for large size data set by using generator.
TODO(
"""
raise NotImplementError('NeuralNetwork.fit_one_batch')
def predict(self, x):
"""Returns predicted result.
Arguments
---------
x : np.array
Discriptive features in 2d array,
whose shape is (num of data, num of features)
Returns
-------
np.array
Predicted target features in 2d array,
whose shape is (num of data, num of features)
"""
return self.layers[0].predict(x.astype(self.dtype))
def get_config_str(self):
config_str = ''
for i, layer in enumerate(self.layers):
config_str += layer.get_config_str() + '\n'
config_str = config_str.rstrip('\n')
return config_str
def save(self, name, path=None):
"""Save model to storage.
Arguments
---------
name : str or None, default None
Name of the resulting file. Doesn't include path.
path : str or None, default None
Full path to the directory where the resulting file is generated.
If None, file is saved in a directory where script runs.
Returns
-------
bool
Returns true when succeeded.
"""
if path is None:
path = '.'
if path[0] == '~':
path = os.getenv("HOME") + path[1:]
try:
with open(os.path.join(path, name), 'wb') as f:
pickle.dump(self, f)
except IOError as e:
msg = str(e) + '\nNeuralNetwork.save failed.'
raise DNNetIOError(msg)
def visualize_filters(
self, index, n_rows, n_cols, filter_shape, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise DNNetRuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
filter_shape : tuple (rows, cols)
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
# Get index of layer which is index-th layer with weight matrix.
n_layers_w_filter = 0
tgt_layer_idx = None
tgt_layer_type = None
for i, layer in enumerate(self.layers, 0):
if layer.has_weight():
if n_layers_w_filter == index:
tgt_layer_idx = i
tgt_layer_type = layer.get_type()
break
n_layers_w_filter += 1
if tgt_layer_idx is None:
msg = str(index) + '-th layer with weight matrix doesn\'t exist.'
raise DNNetRuntimeError(msg)
if tgt_layer_type == 'convolution':
self.visualize_filter_of_convolution_layer(
self.layers[tgt_layer_idx], n_rows, n_cols, filter_shape, figsize)
elif tgt_layer_type == 'affine':
self.visualize_filter_of_affine_layer(
self.layers[tgt_layer_idx], n_rows, n_cols, filter_shape, figsize)
else:
msg = 'NeuralNetwork.visualize_filters does not support '\
+ '%s' % tgt_layer_type
raise DNNetRuntimeError(msg)
print(tgt_layer_idx, tgt_layer_type)
def visualize_filter_of_convolution_layer(
self, layer, n_rows, n_cols, filter_shape, figsize=(8, 8)):
n_filters = layer.w.shape[1]
if n_filters < n_rows * n_cols:
msg = 'n_rows and n_cols is too big.\n'\
+ 'n_filters : %d\n' % n_filters\
+ 'n_rows : %d\n' % n_rows\
+ 'n_cols : %d\n' % n_cols
raise DNNetRuntimeError(msg)
w = layer.w[1:, :n_rows*n_cols]
img = w.T.reshape(-1, filter_shape[0], filter_shape[1])
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
def visualize_filter_of_affine_layer(
self, layer, n_rows, n_cols, filter_shape, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise DNNetRuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
w = layer.w
if (w.shape[0] - 1) != prod(shape):
msg = '(w.shape[0] - 1) != prod(shape)\n'\
+ 'w.shape[0] : %d\n' % w.shape[0]\
+ 'prod(shape) : %d' % prod(shape)
raise DNNetRuntimeError(msg)
#if w.shape[1] < prod(layout):
#img = w2im(self.layers[tgt_index].w, shape, layout)
#plt.figure(figsize=figsize)
#plt.imshow(img)
#plt.show()
def show_filters(self, index, shape, layout, figsize=(8, 8)):
"""Visualize filters.
Weight matrix in affine layer or convolution layer
can be shown as image.
If weight matrix is so big that all filters cannot be displayed,
displayed filters are randomly selected.
Arguments
---------
index : int
index-th affine/convolution layer's weight matrix is visualized.
This index starts from 0, that is,
the first layer with weight matrix is 0-th.
If this value is out of range, raise RuntimeError.
shape : tuple (rows, cols)
Shape of filter. In the case of multi-channel, filters are
taken as single channel by taking average over channels.
layout : tuple (rows, cols)
Number of filter to display
in direction of rows and cols respectively.
"""
# Get index of layer which is index-th layer with weight matrix.
num_of_layer_with_filter = 0
tgt_index = None
for i, layer in enumerate(self.layers, 0):
if layer.has_weight():
if num_of_layer_with_filter == index:
tgt_index = i
break
num_of_layer_with_filter += 1
if tgt_index is None:
msg = str(index) + '-th layer with weight matrix doesn\'t exist.'
raise DNNetRuntimeError(msg)
img = w2im(self.layers[tgt_index].w, shape, layout)
plt.figure(figsize=figsize)
plt.imshow(img)
plt.show()
def __convert_dtype(self, x, y):
"""Convert data type of features into selected one in constructor."""
return x.astype(self.dtype), y.astype(self.dtype)
|
bsd-3-clause
|
AlexanderFabisch/scikit-learn
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
parkus/arc
|
test_script_roberts.py
|
1
|
2526
|
import arc.arc_functions as arc # so that reload(arc) works, but note that you can also just do "import arc"
import numpy as np
import numpy.random as r
from math import pi
# for repeatability, seed the random number generator
r.seed(42)
## GENERATE DATA
# create data that are sines of random period, amplitude, and phase
N = 1000 # number of data points
M = 200 # number of data series
t = np.arange(N)
# generate random amplitudes, periods, and phases
amps = r.normal(1.0, 0.3, M)
phases = r.uniform(0.0, 2*pi, M)
periods = r.uniform(4.0, 4.0 * N, M)
# compute sine curves and make NxM data array
make_curve = lambda a, P, ph: a * np.sin(2*pi * t / P + ph)
data_list = map(make_curve, amps, periods, phases)
data = np.transpose(data_list)
# add noise
rel_amps = r.uniform(high=0.05, size=M)
rel_noise = r.normal(size=[N, M]) * rel_amps[np.newaxis, :]
abs_noise = rel_noise * amps[np.newaxis, :]
data = data + abs_noise
## CREATE TRENDS
# function to mean subtract and std dev normalize
norm = lambda y: (y - np.mean(y)) / np.std(y)
# exponential decay
exp = norm(np.exp( t / (N * 1.0)))
# quadratic
quad = norm((t - N/2.0)**2)
## INJECT TRENDS
def trend_array(rel_amps, trend):
abs_amps = rel_amps * amps
trend_list = [trend * a for a in abs_amps]
return np.transpose(trend_list)
i = np.arange(M)
decaying_amps = 5.0 * np.exp(-i / (M / 4))
quad_arr = trend_array(decaying_amps, quad)
exp_arr = trend_array(2 * decaying_amps[::-1], exp)
trended = data + quad_arr + exp_arr
# injection function
#def inject(data, trend, amp_mean, amp_std):
# shape = (amp_mean/amp_std)**2
# scale = amp_mean/shape
# rel_amps = r.gamma(shape, scale, M)
# trends = trend_array(rel_amps, trend)
# return data + trends
## rel_amps = r.normal(amp_mean, amp_std, M)
#
#
## inject!
#trended = np.copy(data)
#trended = inject(trended, exp, 2.0, 1.0)
#trended = inject(trended, quad, 1.0, 0.5)
## FIND TRENDS IN SUBSET OF DATA
subset = trended[:, r.choice(M, size=50)]
trends = arc.arc(t, subset, rho_min=0.6, denoise=arc.arc_emd, refine=False)
## PLOT TRENDS
import matplotlib.pyplot as plt
plt.figure()
plt.plot(t, trends)
plt.plot(exp, '--', quad, '--')
plt.title('injected (solid) and retrieved (dashed) trends')
## FIT TRENDS TO A SERIES
i = r.choice(range(M))
y0 = data[:, i]
y1 = trended[:, i]
detrended, trendfit = arc.trend_remove(y1, trends)
plt.figure()
plt.plot(t, np.transpose(y0, y1, trendfit, detrended))
plt.legend(['original data', 'data with trend', 'best-fit trend', 'detrended data'])
|
mit
|
vrieni/orange
|
Orange/clustering/mixture.py
|
6
|
14548
|
"""
*******************************
Gaussian Mixtures (``mixture``)
*******************************
This module implements a Gaussian mixture model.
Example ::
>>> mixture = GaussianMixture(data, n=3)
>>> print mixture.means
>>> print mixture.weights
>>> print mixture.covariances
>>> plot_model(data, mixture, samples=40)
"""
import sys, os
import numpy
import random
import Orange
class GMModel(object):
""" Gaussian mixture model
"""
def __init__(self, weights, means, covariances, inv_covariances=None,
cov_determinants=None):
self.weights = weights
self.means = means
self.covariances = covariances
if inv_covariances is None:
self.inv_covariances = [numpy.linalg.pinv(cov) for cov in covariances]
else:
self.inv_covariances = inv_covariances
if cov_determinants is None:
self.cov_determinants = [numpy.linalg.det(cov) for cov in covariances]
else:
self.cov_determinants = cov_determinants
def __call__(self, instance):
""" Return the probability of instance.
"""
return numpy.sum(prob_est([instance], self.weights, self.means,
self.covariances,
self.inv_covariances,
self.cov_determinants))
def __getitem__(self, index):
""" Return the index-th gaussian.
"""
return GMModel([1.0], self.means[index: index + 1],
self.covariances[index: index + 1],
self.inv_covariances[index: index + 1],
self.cov_determinants[index: index + 1])
def __len__(self):
return len(self.weights)
def init_random(data, n, *args, **kwargs):
""" Init random means and correlations from a data table.
:param data: data table
:type data: :class:`Orange.data.Table`
:param n: Number of centers and correlations to return.
:type n: int
"""
if isinstance(data, Orange.data.Table):
array, w, c = data.toNumpyMA()
else:
array = numpy.asarray(data)
min, max = array.max(0), array.min(0)
dim = array.shape[1]
means = numpy.zeros((n, dim))
for i in range(n):
means[i] = [numpy.random.uniform(low, high) for low, high in zip(min, max)]
correlations = [numpy.asmatrix(numpy.eye(dim)) for i in range(n)]
return means, correlations
def init_kmeans(data, n, *args, **kwargs):
""" Init with k-means algorithm.
:param data: data table
:type data: :class:`Orange.data.Table`
:param n: Number of centers and correlations to return.
:type n: int
"""
if not isinstance(data, Orange.data.Table):
raise TypeError("Orange.data.Table instance expected!")
from Orange.clustering.kmeans import Clustering
km = Clustering(data, centroids=n, maxiters=20, nstart=3)
centers = Orange.data.Table(km.centroids)
centers, w, c = centers.toNumpyMA()
dim = len(data.domain.attributes)
correlations = [numpy.asmatrix(numpy.eye(dim)) for i in range(n)]
return centers, correlations
def prob_est1(data, mean, covariance, inv_covariance=None, det=None):
""" Return the probability of data given mean and covariance matrix
"""
data = numpy.asmatrix(data)
mean = numpy.asmatrix(mean)
if inv_covariance is None:
inv_covariance = numpy.linalg.pinv(covariance)
inv_covariance = numpy.asmatrix(inv_covariance)
diff = data - mean
p = numpy.zeros(data.shape[0])
for i in range(data.shape[0]):
d = diff[i]
p[i] = d * inv_covariance * d.T
p *= -0.5
p = numpy.exp(p)
p /= numpy.power(2 * numpy.pi, numpy.rank(covariance) / 2.0)
if det is None:
det = numpy.linalg.det(covariance)
assert(det != 0.0)
p /= det
return p
def prob_est(data, weights, means, covariances, inv_covariances=None, cov_determinants=None):
""" Return the probability estimation of data for each
gausian given weights, means and covariances.
"""
if inv_covariances is None:
inv_covariances = [numpy.linalg.pinv(cov) for cov in covariances]
if cov_determinants is None:
cov_determinants = [numpy.linalg.det(cov) for cov in covariances]
data = numpy.asmatrix(data)
probs = numpy.zeros((data.shape[0], len(weights)))
for i, (w, mean, cov, inv_cov, det) in enumerate(zip(weights, means,
covariances, inv_covariances,
cov_determinants)):
probs[:, i] = w * prob_est1(data, mean, cov, inv_cov, det)
return probs
class EMSolver(object):
""" An EM solver for gaussian mixture model
"""
_TRACE_MEAN = False
def __init__(self, data, weights, means, covariances):
self.data = data
self.weights = weights
self.means = means
self.covariances = covariances
self.inv_covariances = [numpy.matrix(numpy.linalg.pinv(cov)) for cov in covariances]
self.cov_determinants = [numpy.linalg.det(cov) for cov in self.covariances]
self.n_clusters = len(self.weights)
self.data_dim = self.data.shape[1]
self.probs = prob_est(data, weights, means, covariances,
self.inv_covariances, self.cov_determinants)
self.log_likelihood = self._log_likelihood()
# print "W", self.weights
# print "P", self.probs
# print "L", self.log_likelihood
# print "C", self.covariances
# print "Det", self.cov_determinants
def _log_likelihood(self):
""" Compute the log likelihood of the current solution.
"""
log_p = numpy.log(numpy.sum(self.weights * self.probs, axis=0))
return 1.0 / len(self.data) * numpy.sum(log_p)
def E_step(self):
""" E Step
"""
self.probs = prob_est(self.data, self.weights, self.means,
self.covariances, self.inv_covariances)
# print "PPP", self.probs
# print "P sum", numpy.sum(self.probs, axis=1).reshape((-1, 1))
self.probs /= numpy.sum(self.probs, axis=1).reshape((-1, 1))
# Update the Q
# self.Q = 0.0
# prob_sum = numpy.sum(self.probs, axis=0)
# self.Q = sum([p*(numpy.log(w) - 0.5 * numpy.linalg.det(cov)) \
# for p, w, cov in zip(prob_sum, self.weights,
# self.covariances)]) * \
# len(self.data)
#
# for i in range(len(data)):
# for j in range(self.n_clusters):
# diff = numpy.asmatrix(self.data[i] - self.means[j])
# self.Q += - 0.5 * self.probs[i, j] * diff.T * self.inv_covariances[j] * diff
# print self.Q
self.log_likelihood = self._log_likelihood()
# print "Prob:", self.probs
# print "Log like.:", self.log_likelihood
def M_step(self):
""" M step
"""
# Compute the new weights
prob_sum = numpy.sum(self.probs, axis=0)
weights = prob_sum / numpy.sum(prob_sum)
# Compute the new means
means = []
for j in range(self.n_clusters):
means.append(numpy.sum(self.probs[:, j].reshape((-1, 1)) * self.data, axis=0) / prob_sum[j])
# Compute the new covariances
covariances = []
cov_determinants = []
for j in range(self.n_clusters):
cov = numpy.zeros(self.covariances[j].shape)
diff = self.data - means[j]
diff = numpy.asmatrix(diff)
for i in range(len(self.data)): # TODO: speed up
cov += self.probs[i, j] * diff[i].T * diff[i]
cov *= 1.0 / prob_sum[j]
det = numpy.linalg.det(cov)
covariances.append(numpy.asmatrix(cov))
cov_determinants.append(det)
# self.inv_covariances[j] = numpy.linalg.pinv(cov)
# self.cov_determinants[j] = det
self.weights = weights
self.means = numpy.asmatrix(means)
self.covariances = covariances
self.cov_determinants = cov_determinants
def one_step(self):
""" One iteration of both M and E step.
"""
self.E_step()
self.M_step()
def run(self, max_iter=sys.maxint, eps=1e-5):
""" Run the EM algorithm.
"""
if self._TRACE_MEAN:
from pylab import plot, show, draw, ion
ion()
plot(self.data[:, 0], self.data[:, 1], "ro")
vec_plot = plot(self.means[:, 0], self.means[:, 1], "bo")[0]
curr_iter = 0
while True:
old_objective = self.log_likelihood
self.one_step()
if self._TRACE_MEAN:
vec_plot.set_xdata(self.means[:, 0])
vec_plot.set_ydata(self.means[:, 1])
draw()
curr_iter += 1
# print curr_iter
# print abs(old_objective - self.log_likelihood)
if abs(old_objective - self.log_likelihood) < eps or curr_iter > max_iter:
break
class GaussianMixture(object):
""" Computes the gaussian mixture model from an Orange data-set.
"""
def __new__(cls, data=None, weight_id=None, **kwargs):
self = object.__new__(cls)
if data is not None:
self.__init__(**kwargs)
return self.__call__(data, weight_id)
else:
return self
def __init__(self, n=3, init_function=init_kmeans):
self.n = n
self.init_function = init_function
def __call__(self, data, weight_id=None):
from Orange.data import preprocess
#import Preprocessor_impute, DomainContinuizer
# data = Preprocessor_impute(data)
dc = preprocess.DomainContinuizer()
dc.multinomial_treatment = preprocess.DomainContinuizer.AsOrdinal
dc.continuous_treatment = preprocess.DomainContinuizer.NormalizeByVariance
dc.class_treatment = preprocess.DomainContinuizer.Ignore
domain = dc(data)
data = data.translate(domain)
means, correlations = self.init_function(data, self.n)
means = numpy.asmatrix(means)
array, _, _ = data.to_numpy_MA()
# avg = numpy.mean(array, axis=0)
# array -= avg.reshape((1, -1))
# means -= avg.reshape((1, -1))
# std = numpy.std(array, axis=0)
# array /= std.reshape((1, -1))
# means /= std.reshape((1, -1))
solver = EMSolver(array, numpy.ones(self.n) / self.n,
means, correlations)
solver.run()
norm_model = GMModel(solver.weights, solver.means, solver.covariances)
return GMClusterModel(domain, norm_model)
class GMClusterModel(object):
"""
"""
def __init__(self, domain, norm_model):
self.domain = domain
self.norm_model = norm_model
self.cluster_vars = [Orange.feature.Continuous("cluster %i" % i)\
for i in range(len(norm_model))]
self.weights = self.norm_model.weights
self.means = self.norm_model.means
self.covariances = self.norm_model.covariances
self.inv_covariances = self.norm_model.inv_covariances
self.cov_determinants = self.norm_model.cov_determinants
def __call__(self, instance, *args):
data = Orange.data.Table(self.domain, [instance])
data,_,_ = data.to_numpy_MA()
# print data
p = prob_est(data, self.norm_model.weights,
self.norm_model.means,
self.norm_model.covariances,
self.norm_model.inv_covariances,
self.norm_model.cov_determinants)
# print p
p /= numpy.sum(p)
vals = []
for p, var in zip(p[0], self.cluster_vars):
vals.append(var(p))
return vals
def plot_model(data_array, mixture, axis=(0, 1), samples=20, contour_lines=20):
""" Plot the scaterplot of data_array and the contour lines of the
probability for the mixture.
"""
import matplotlib
import matplotlib.pylab as plt
import matplotlib.cm as cm
axis = list(axis)
if isinstance(mixture, GMClusterModel):
mixture = mixture.norm_model
if isinstance(data_array, Orange.data.Table):
data_array, _, _ = data_array.to_numpy_MA()
array = data_array[:, axis]
weights = mixture.weights
means = mixture.means[:, axis]
covariances = [cov[axis,:][:, axis] for cov in mixture.covariances] # TODO: Need the whole marginal distribution.
gmm = GMModel(weights, means, covariances)
min = numpy.min(array, 0)
max = numpy.max(array, 0)
extent = (min[0], max[0], min[1], max[1])
X = numpy.linspace(min[0], max[0], samples)
Y = numpy.linspace(min[1], max[1], samples)
Z = numpy.zeros((X.shape[0], Y.shape[0]))
for i, x in enumerate(X):
for j, y in enumerate(Y):
Z[i, j] = gmm([x, y])
plt.plot(array[:,0], array[:,1], "ro")
plt.contour(X, Y, Z.T, contour_lines,
extent=extent)
im = plt.imshow(Z.T, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=extent)
plt.plot(means[:, 0], means[:, 1], "b+")
plt.show()
def test(seed=0):
# data = Orange.data.Table(os.path.expanduser("brown-selected.tab"))
# data = Orange.data.Table(os.path.expanduser("~/Documents/brown-selected-fss.tab"))
# data = Orange.data.Table(os.path.expanduser("~/Documents/brown-selected-fss-1.tab"))
# data = Orange.data.Table(os.path.expanduser("~/ozone1"))
data = Orange.data.Table("iris.tab")
# data = Orange.data.Table(Orange.data.Domain(data.domain[:2], None), data)
numpy.random.seed(seed)
random.seed(seed)
gmm = GaussianMixture(data, n=3, init_function=init_kmeans)
data = data.translate(gmm.domain)
plot_model(data, gmm, axis=(0, 2), samples=40, contour_lines=100)
if __name__ == "__main__":
test()
|
gpl-3.0
|
michalmonselise/sparklingpandas
|
sparklingpandas/pstatcounter.py
|
4
|
5444
|
"""
This module provides statistics for L{PRDD}s.
Look at the stats() method on PRDD for more info.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
from pyspark.statcounter import StatCounter
import scipy.stats as scistats
import numpy as np
class PStatCounter(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes, columns):
"""
Creates a stats counter for the provided DataFrames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on.
columns: list of strs, list of columns to compute the stats on.
"""
assert (not isinstance(columns, basestring)), "columns should be a " \
"list of strs, " \
"not a str!"
assert isinstance(columns, list), "columns should be a list!"
self._columns = columns
self._counters = dict((column, StatCounter()) for column in columns)
for df in dataframes:
self.merge(df)
def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value)
def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self
def __str__(self):
formatted_str = ""
for column, counter in self._counters.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
class ColumnStatCounters(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes=None, columns=None):
"""
Creates a stats counter for the provided data frames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on columns: list of strs, list of columns to compute the stats on
"""
self._column_stats = dict((column_name, StatCounter()) for
column_name in columns)
for single_df in dataframes:
self.merge(single_df)
def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self
def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self
def __str__(self):
formatted_str = ""
for column, counter in self._column_stats.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
|
apache-2.0
|
imaculate/scikit-learn
|
sklearn/cluster/dbscan_.py
|
7
|
12319
|
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
|
bsd-3-clause
|
ivanamihalek/tcga
|
tcga/02_expression/206_store_distros.py
|
1
|
17316
|
#!/usr/bin/python -u
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: [email protected]
#
#/usr/local/bin/python -u
# this version of python has scipy 0.15.1 - earlier versions had some bug in lognorm; but it dosn't have mysqlDB
# to speed up things make index: (int 01_maf_somatic_mutations_table.sql)
# create index mutation_idx on somatic_mutations (tumor_sample_barcode, chromosome, strand, start_position)
# note: I am assuming then that no tumot will have the exact same mutation in both alleles, for the simple reason that I do nto see
# how would the thwo entried in the database then be distinguished from one another
# (rather if tumor_seq_allele1 == tumor_seq_allele2 != match_norm_seq_allele1 and tumor_seq_allele2 != match_norm_seq_allele2
# then I have somehting like that)
# the lognormal is more right skewed (CV3+3CV) than the gamma (2CV).
import MySQLdb
from tcga_utils.mysql import *
from math import sqrt, floor, ceil, exp
import os
from scipy import stats
from time import time
import matplotlib.pyplot as plt
import numpy as np
from random import sample
output = True
to_file = False
use_normal_tissue = False
store_in_db = False
if store_in_db: use_normal_tissue = False # at least until we create a separate table
# tumor: ['01', '03', '08', '09']
# normal: ['10', '11', '12', '14']
#########################################
def bad (cursor, symbol):
if symbol[:3] == 'LOC': return True
if symbol[0] == 'C' and 'ORF' in symbol: return True
if symbol[:4]== 'KIAA': return True
switch_to_db(cursor, 'baseline')
qry = "select locus_type from hgnc_id_translation where approved_symbol = '%s'" % symbol
rows = search_db(cursor, qry)
if not rows: return True
if not rows[0][0] == 'gene with protein product': return True
return False
#########################################
def blurb (symbol, description, comment, outf, cursor):
[nobs, [min,max], mean, variance, skewness, kurtosis, normaltest, left, right] = description
if output:
if False:
print >> outf,"===================================="
print >> outf,"%s " % symbol
print >> outf,"pts: %4d min = %6.2f max = %10.2f " % ( nobs, min, max)
print >> outf,"mean = %10.2f stdev = %6.2f skew = %5.2f kurt = %8.2f" % (mean, sqrt(variance), skewness, kurtosis)
print >> outf,"fract left = %5.3f fract right = %5.3f " % (left, right)
print >> outf, comment
else:
print >> outf,"%15s " % symbol,
print >> outf,"%4d %6.2f %10.2f " % ( nobs, min, max),
print >> outf,"%10.2f %6.2f %5.2f %8.2f" % (mean, sqrt(variance), skewness, kurtosis),
print >> outf," %5.3f %5.3f " % (left, right)
#print >> outf, comment
# if we are storing, do that here too
if store_in_db:
fixed_fields = {'symbol':symbol}
update_fields = {'number_of_points':nobs, 'min':min, 'max':max, 'mean':mean,
'stdev':sqrt(variance), 'skewness':skewness, 'kurtosis':kurtosis, 'normaltest':normaltest}
ok = store_or_update (cursor, 'rnaseq_distro_description', fixed_fields, update_fields)
if not ok:
print 'store failure:'
print fixed_fields
print update_fields
exit(1)
#########################################
def is_bimodal(histogram):
for i in range (1, len(histogram)):
val = histogram[i]
left_max = max(10,val)
left_max_found = False
for j in range(0,i):
if histogram[j] > left_max:
left_max = histogram[j]
left_max_found = True
break
right_max = max(10,val)
right_max_found = False
for j in range(i+1, len(histogram)):
if histogram[j] > left_max:
right_max = histogram[j]
right_max_found = True
break
if left_max_found and right_max_found:
return True
return False
#################################################
def optimize_distro_domain (symbol,val_array, init_width, mean, stdev, distro, verbose=True):
if distro=='gamma':
model_distro = stats.gamma
elif distro== 'lognorm':
model_distro = stats.lognorm
done = False
round_ct = 0
right_side = False
pval_max = -1.0
prev_pval = -1.0
pval = -1.0
opt_bound = [0,len(val_array)]
opt_params = [0, 0, 0]
left_cut = 0
right_cut = len(val_array)
# take the core of the data and then work outwards from there
while (val_array[left_cut] < mean - stdev*init_width) and left_cut < len(val_array): left_cut += 1
while (val_array[right_cut-1] > mean + stdev*init_width) and right_cut >1: right_cut -= 1
left_bound = [0,left_cut]
right_bound = [right_cut,len(val_array)]
init_left_cut = left_cut
init_right_cut = right_cut
while not done or round_ct==len(val_array):
round_ct += 1
prev_pval = pval
[shape, location, scale] = model_distro.fit(val_array[left_cut:right_cut])
[D, pval] = stats.kstest(val_array[left_cut:right_cut], distro, (shape, location, scale))
test1 = (pval < prev_pval)
#test2 = (location < 0.0)
test2 = False
if distro=='gamma':
test3 = (shape < 1.0)
else:
test3 = False
if pval >= 0.9 and not test2 and not test3 and right_side==True and round_ct>1:
if verbose: print "passed the basic citerion - out of the optimization loop with cut values", left_cut, right_cut
done = True
break # let's break here not to go crazy with the indentation
if right_side:
if round_ct>1 and (test1 or test3 ): #getting worse, backpedal
right_bound[1] = right_cut
right_cut = int (floor ( (right_bound[1] + right_bound[0])/2.0 ))
if verbose: print "backpedal: right_cut =", right_cut
if right_bound[1] <= right_bound[0]+1:
done=True
if verbose: print "the interval closed, were done on the right", right_bound
if right_cut == init_right_cut:
if verbose: print "we're back ot the iniitival value of the right cut (done)", right_cut
if verbose: done=True
else: #this is an improvement, keep moving in the same direction
if (pval > pval_max and not test2 and not test3):
pval_max = pval
opt_bound = [left_cut, right_cut]
opt_params = [shape, location, scale]
# but first check if there is room to move
if right_bound[1] <= right_bound[0]+1:
done = True
if verbose: print "we're done on the right - bounds:", right_bound
else:
right_bound[0] = right_cut
right_cut = int (ceil ( (right_bound[1] + right_bound[0])/2.0 ))
if verbose: print "move forward: right_cut =", right_cut
else: # left_side
if round_ct>1 and (test1 or test3 ): #getting worse, backpedal
left_bound[0] = left_cut
left_cut = int (ceil ((left_bound[1] + left_bound[0])/2.0))
if verbose: print "backpedal: left_cut =", left_cut
if left_bound[1] <= left_bound[0]+1:
right_side = True
if verbose: print "the interval closed, were done on the left", left_bound
if left_cut == init_left_cut:
if verbose: print "we're back to the initial value of the left cut (done)", left_cut
right_side =True
else: #this is an improvement, keep moving in the same direction
if (pval > pval_max and not test2 and not test3):
pval_max = pval
opt_bound = [left_cut, right_cut]
opt_params = [shape, location, scale]
if left_bound[1] <= left_bound[0]+1:
right_side = True
round_ct = 0
if verbose: print "we're done on the left - bounds:", left_bound
else:
left_bound[1] = left_cut
left_cut = int (floor ( (left_bound[1] + left_bound[0])/2.0 ))
if verbose: print "move forward: left_cut =", left_cut
return [pval_max] + opt_bound + opt_params
#########################################
def optimization_loop (symbol, val_array, mean, stdev, use_gamma, verbose=False):
opt_results = []
pval_max = -1
for init_width in [10, 5, 2, 1, 0.5]:
retvals = optimize_distro_domain (symbol, val_array, init_width, mean, stdev, use_gamma, verbose)
pval = retvals[0]
if (pval > pval_max):
pval_max = pval
opt_results = retvals
if pval > 0.9:
break
return opt_results
#########################################
def store (cursor, symbol, distro, description, distro_params):
fixed_fields = {'symbol':symbol}
update_fields = {}
[nobs, [min,max], mean, variance, skew, kurtosis] = description
update_fields['number_of_points'] = nobs
update_fields['min'] = min
update_fields['max'] = max
update_fields['mean'] = mean
update_fields['stdev'] = sqrt(variance)
update_fields['skewness'] = skew
update_fields['kurtosis'] = kurtosis
update_fields['distro'] = distro
if distro_params:
update_fields['KL_pval'] = distro_params[0]
update_fields['left_cut'] = distro_params[1]
update_fields['right_cut'] = distro_params[2]
update_fields['shape'] = distro_params[3]
update_fields['location'] = distro_params[4]
update_fields['scale'] = distro_params[5]
update_fields['interval_endpoints'] = distro_params[6]
ok = store_or_update(cursor, 'rnaseq_distro_description', fixed_fields, update_fields)
if not ok:
print 'store failure:'
print fixed_fields
print update_fields
exit(1)
#########################################
def store_fitted (cursor, symbol, distro, description, opt_result_lognorm):
if distro=="lognorm":
interval = stats.lognorm.interval
elif distro=="gamma":
interval = stats.gamma.interval
else:
print "unrecognized distro:", distro
exit(1)
[left_cut, right_cut] = opt_result_lognorm[1:3]
[shape, location, scale] = opt_result_lognorm[3:]
intervals_blob = ""
for alpha in [99, 95, 90]:
if intervals_blob: intervals_blob += ";"
intervals_blob += "%.2f;%.2f" % tuple(interval(alpha/100.0, shape, location, scale))
distro_params = opt_result_lognorm + [intervals_blob]
store (cursor, symbol, distro, description, distro_params)
#########################################
def process_data_set (cursor, db_name, gene_list):
switch_to_db(cursor, db_name)
if use_normal_tissue:
qry = "select count(1) from rnaseq_rpkm where source_code in (10,11,12,14)"
rows = search_db(cursor, qry)
if not rows or not rows[0][0] > 0:
print "no normal tissue samples in ", db_name
return
values = {}
no_match = []
start = time()
ct = 0
# get the scaling factors
qry = "select * from rnaseq_scaling"
rows = search_db(cursor, qry)
if not rows:
print "no scaling info"
exit(1)
scaling = {}
for row in rows:
[sample_id, scaling_factor] = row
scaling[sample_id] = scaling_factor
#toyset = sample(gene_list, 100)
toyset = ['TP53', 'RPL5', 'RPL11', "RPL22", 'WNT11', 'WLS', "PORCN", 'MDM2',
'CDKN2A', 'ACTN', 'ACTB', 'LEP', 'GPR161', 'CDK2', 'HIF1A',
'ERBB2', 'PTEN','CDKN2A', 'LTN1','NEMF', 'NMD3','EIF6'] # melanoma
for symbol in toyset:
ct += 1
if not ct%1000: print "%d done in %5.2fs" % (ct, time() - start)
if use_normal_tissue:
qry = "select rpkm, experiment_id from rnaseq_rpkm where symbol='%s' and source_code in (10,11,12,14)" % symbol
else:
qry = "select rpkm, experiment_id from rnaseq_rpkm where symbol='%s' and source_code in (1,3,8,9)" % symbol
rows = search_db(cursor, qry)
if not rows:
#print "no match for ", symbol
no_match.append(symbol)
continue
values[symbol] = []
for row in rows:
[rpkm, sample_id] = row
values[symbol].append(rpkm*scaling[sample_id])
#print "search done in %5.2fs" % (time() - start)
# TODO looks like some name esolution is in order here in general
tot = 0
if not values.has_key(symbol): continue
val_array = values[symbol]
if bad (cursor, symbol): continue
if not val_array: continue
switch_to_db(cursor, db_name)
tot += 1
description = stats.describe(val_array)
[nobs, [min,max], mean, variance, skewness, kurtosis] = description
stdev = sqrt(variance)
if mean < 2:
#print symbol, "weak"
store (cursor, symbol, "weak", description, [])
continue
if nobs < 20:
#print "too few samples for a fit - bailing out"
store (cursor, symbol, "small", description, [])
continue
val_array = sorted(val_array)
distro = 'lognorm'
opt_result_lognorm = optimization_loop (symbol, val_array, mean, stdev, distro, verbose=False)
lognorm_ok = (len(opt_result_lognorm)==6)
# if we are happy with the lognorm fit, we won't do gamma fitting at all
if lognorm_ok and opt_result_lognorm[0] > 0.9:
#print symbol, "lognorm p: %5.2f" % opt_result_lognorm[0]
store_fitted (cursor, symbol, "lognorm", description, opt_result_lognorm)
continue
distro = 'gamma'
opt_result_gamma = optimization_loop (symbol, val_array, mean, stdev, distro, verbose=False)
gamma_ok = (len(opt_result_gamma)==6)
# use gamma if it worked and lognorm failed
if gamma_ok and not lognorm_ok:
store_fitted (cursor, symbol, "gamma", description, opt_result_gamma)
#print "use gamma bcs lognorm failed"
continue
# one more chance for gamma if it is better fit, and covers larger interval than lognorm
use_gamma = False
if gamma_ok and lognorm_ok:
[left_cut_g, right_cut_g] = opt_result_gamma[1:3]
[left_cut_l, right_cut_l] = opt_result_lognorm[1:3]
use_gamma = (opt_result_gamma[0] - opt_result_lognorm[0]) > 0.1
use_gamma = use_gamma and left_cut_g < left_cut_l and right_cut_l < right_cut_g
if use_gamma:
store_fitted (cursor, symbol, "gamma", description, opt_result_gamma)
#print "use gamma bcs it is a batter fit"
continue
elif lognorm_ok: # lognorm did return something though we were hoping for something better
store_fitted (cursor, symbol, "lognorm", description, opt_result_lognorm)
#print "use lognorm after all %5.2f" % opt_result_lognorm[0]
continue
else:
#print "no lognorm, no gamma"
store (cursor, symbol, "fail", description, [])
#########################################
def main():
db = connect_to_mysql()
cursor = db.cursor()
db_names = ["BLCA", "BRCA", "COAD", "HNSC", "KIRC", "KIRP", "LIHC", "LUAD", "LUSC", "REA", "UCEC"]
db_names = ["BRCA", "COAD", "KIRC", "KIRP", "LIHC", "LUAD", "LUSC", "REA", "UCEC"]
# gene names
gene_list = []
if False:
switch_to_db(cursor, 'baseline')
qry = "select distinct approved_symbol from hgnc_id_translation where locus_type = 'gene with protein product' "
rows = search_db(cursor, qry)
gene_list = [row[0] for row in rows]
print "\t number of genes", len(gene_list)
for db_name in db_names:
print " ** ", db_name
process_data_set (cursor, db_name, gene_list)
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
|
gpl-3.0
|
alexsavio/scikit-learn
|
sklearn/manifold/locally_linear.py
|
6
|
25912
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = stable_cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
PythonCharmers/bokeh
|
bokeh/sampledata/gapminder.py
|
41
|
2655
|
from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
|
jplourenco/bokeh
|
examples/app/stock_applet/stock_app_simple.py
|
43
|
12408
|
"""
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import (HBox, VBox, VBoxForm, PreText,
Select, AppHBox, AppVBox, AppVBoxForm)
from bokeh.simpleapp import simpleapp
select1 = Select(name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
select2 = Select(name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
@simpleapp(select1, select2)
def stock(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
row1 = HBox(children=[p, pretext])
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
row2 = HBox(children=[hist1, hist2])
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
output = VBox(children=[row1, row2, line1, line2])
return output
stock.route("/bokeh/stocks/")
@simpleapp(select1, select2)
def stock2(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
return dict(scatterplot=p,
statstext=pretext,
hist1=hist1,
hist2=hist2,
line1=line1,
line2=line2)
@stock2.layout
def stock2_layout(app):
widgets = AppVBoxForm(app=app, children=['ticker1', 'ticker2'])
row1 = AppHBox(app=app, children=['scatterplot', 'statstext'])
row2 = AppHBox(app=app, children=['hist1', 'hist2'])
all_plots = AppVBox(app=app, children=[row1, row2, 'line1', 'line2'])
app = AppHBox(app=app, children=[widgets, all_plots])
return app
@stock2.update(['ticker1', 'ticker2'])
def stock2_update_input(ticker1, ticker2, app):
return stock2(ticker1, ticker2)
@stock2.update([({'tags' : 'main_source'}, ['selected'])])
def stock2_update_selection(ticker1, ticker2, app):
source = app.select_one({'tags' : 'main_source'})
df = get_data(ticker1, ticker2)
if source.selected:
selected_df = df.iloc[source.selected['1d']['indices'], :]
else:
selected_df = df
stats_text = app.objects['statstext']
stats_text.text = str(selected_df.describe())
return {
'hist1': hist_plot(df, ticker1, selected_df=selected_df),
'hist2': hist_plot(df, ticker2, selected_df=selected_df),
'statstext': stats_text,
}
stock2.route("/bokeh/stocks2/")
def hist_plot(df, ticker, selected_df=None):
if selected_df is None:
selected_df = df
global_hist, global_bins = np.histogram(df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def line_plot(ticker, source, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=source,
nonselection_alpha=0.02
)
return p
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
# class StockApp(VBox):
# extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
# jsmodel = "VBox"
# # text statistics
# pretext = Instance(PreText)
# # plots
# plot = Instance(Plot)
# line_plot1 = Instance(Plot)
# line_plot2 = Instance(Plot)
# hist1 = Instance(Plot)
# hist2 = Instance(Plot)
# # data source
# source = Instance(ColumnDataSource)
# # layout boxes
# mainrow = Instance(HBox)
# histrow = Instance(HBox)
# statsbox = Instance(VBox)
# # inputs
# ticker1 = String(default="AAPL")
# ticker2 = String(default="GOOG")
# ticker1_select = Instance(Select)
# ticker2_select = Instance(Select)
# input_box = Instance(VBoxForm)
# def __init__(self, *args, **kwargs):
# super(StockApp, self).__init__(*args, **kwargs)
# self._dfs = {}
# @classmethod
# def create(cls):
# """
# This function is called once, and is responsible for
# creating all objects (plots, datasources, etc)
# """
# # create layout widgets
# obj = cls()
# # create input widgets
# obj.make_inputs()
# # outputs
# obj.pretext = PreText(text="", width=500)
# obj.make_source()
# obj.make_plots()
# obj.make_stats()
# # layout
# obj.set_children()
# return obj
# def make_inputs(self):
# self.ticker1_select = Select(
# name='ticker1',
# value='AAPL',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# self.ticker2_select = Select(
# name='ticker2',
# value='GOOG',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# @property
# def selected_df(self):
# pandas_df = self.df
# selected = self.source.selected
# if selected:
# pandas_df = pandas_df.iloc[selected, :]
# return pandas_df
# def make_source(self):
# self.source = ColumnDataSource(data=self.df)
# def line_plot(self, ticker, x_range=None):
# p = figure(
# title=ticker,
# x_range=x_range,
# x_axis_type='datetime',
# plot_width=1000, plot_height=200,
# title_text_font_size="10pt",
# tools="pan,wheel_zoom,box_select,reset"
# )
# p.circle(
# 'date', ticker,
# size=2,
# source=self.source,
# nonselection_alpha=0.02
# )
# return p
# def hist_plot(self, ticker):
# global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
# hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
# width = 0.7 * (bins[1] - bins[0])
# center = (bins[:-1] + bins[1:]) / 2
# start = global_bins.min()
# end = global_bins.max()
# top = hist.max()
# p = figure(
# title="%s hist" % ticker,
# plot_width=500, plot_height=200,
# tools="",
# title_text_font_size="10pt",
# x_range=[start, end],
# y_range=[0, top],
# )
# p.rect(center, hist / 2.0, width, hist)
# return p
# def make_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# p = figure(
# title="%s vs %s" % (ticker1, ticker2),
# plot_width=400, plot_height=400,
# tools="pan,wheel_zoom,box_select,reset",
# title_text_font_size="10pt",
# )
# p.circle(ticker1 + "_returns", ticker2 + "_returns",
# size=2,
# nonselection_alpha=0.02,
# source=self.source
# )
# self.plot = p
# self.line_plot1 = self.line_plot(ticker1)
# self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
# self.hist_plots()
# def hist_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# self.hist1 = self.hist_plot(ticker1)
# self.hist2 = self.hist_plot(ticker2)
# def set_children(self):
# self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
# self.mainrow.children = [self.input_box, self.plot, self.statsbox]
# self.input_box.children = [self.ticker1_select, self.ticker2_select]
# self.histrow.children = [self.hist1, self.hist2]
# self.statsbox.children = [self.pretext]
# def input_change(self, obj, attrname, old, new):
# if obj == self.ticker2_select:
# self.ticker2 = new
# if obj == self.ticker1_select:
# self.ticker1 = new
# self.make_source()
# self.make_plots()
# self.set_children()
# curdoc().add(self)
# def setup_events(self):
# super(StockApp, self).setup_events()
# if self.source:
# self.source.on_change('selected', self, 'selection_change')
# if self.ticker1_select:
# self.ticker1_select.on_change('value', self, 'input_change')
# if self.ticker2_select:
# self.ticker2_select.on_change('value', self, 'input_change')
# def make_stats(self):
# stats = self.selected_df.describe()
# self.pretext.text = str(stats)
# def selection_change(self, obj, attrname, old, new):
# self.make_stats()
# self.hist_plots()
# self.set_children()
# curdoc().add(self)
# @property
# def df(self):
# return get_data(self.ticker1, self.ticker2)
# # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# # will render this StockApp. If you don't want serve this applet from a Bokeh
# # server (for instance if you are embedding in a separate Flask application),
# # then just remove this block of code.
# @bokeh_app.route("/bokeh/stocks/")
# @object_page("stocks")
# def make_object():
# app = StockApp.create()
# return app
|
bsd-3-clause
|
buchbend/astrolyze
|
build/lib/astrolyze/functions/astro_functions.py
|
1
|
46095
|
# Copyright (C) 2012, Christof Buchbender
# BSD Licencse
import math
import sys
import scipy
from scipy.optimize import leastsq as least
from numpy import asarray, mean, std, where, exp, log, sqrt, arange, float64, floor
from copy import deepcopy as copy
import random as rnd
import astrolyze.functions.constants as const
import astrolyze.functions.units as units
def black_body(x, T, nu_or_lambda='nu'):
r"""
Calculation of the flux density of a black body at a temperature T and a
wavelenght/frequency x.
Parameters
----------
x : float or numpy array
wavelength [GHz] or frequency [micron]; specify type in nu_or_lambda
T : float [Kelvin]
Temperature of the black_body
nu_or_lambda : string
Specify whether x is a frequency :math:`\nu` ``'nu'`` or a wavelenght
:math:`\lambda` ``'lambda'``; default is ``'nu'``.
Returns
-------
Flux density in Jansky : float [Jy]
Notes
-----
This functions resembles the following formulas for input in frequency:
.. math::
B_{\nu} = \frac{2 h \nu^3}{c^2} (e^{\frac{h \nu}{k T}} - 1)^{-1}
and for input in wavelenght:
.. math::
B_{\lambda} = \frac{2 h c^2}{\lambda^5} (e^{\frac{h \lambda}{k T}} -
1)^{-1}
Both formulas are scaled by 1e26, thus returning the flux in Jansky.
Examples
--------
The function works with linear numpy arrays. Thus the black_body can be
evaluated at many points at the same time. Using matplotlib it can
also be plotted:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
import astrolyze.functions.astro_functions as astFunc
frequency_range = np.arange(1e4, 1e7, 1e4)
temperature_1 = 6000
temperature_2 = 12000 # Kelvin
blackbody_1 = astFunc.black_body(frequency_range, temperature_1)
blackbody_2 = astFunc.black_body(frequency_range, temperature_2)
figure = plt.figure()
axis = figure.add_subplot(111)
pl = axis.loglog(frequency_range, blackbody_1, label='T = 6000 K')
pl = axis.loglog(frequency_range, blackbody_2, label='T = 12000 K')
pl = axis.legend()
plt.savefig('black_body.eps')
"""
x = float64(x)
if nu_or_lambda == 'nu':
return float64((2 * const.h * ((x * 1e9) ** 3) / (const.c ** 2)) *
(1 / (math.e ** ((const.h * x * 1e9) / (const.k *
float(T))) - 1)) / 1e-26)
if nu_or_lambda == 'lambda':
return ((2 * const.h * const.c ** 2 / ((x * 1e-6) ** 5)) *
(1 / (math.e ** ((const.h * const.c) /
(x * 1e-6 * const.k * float(T))) - 1)) /
1e-26)
def grey_body(p, x, nu_or_lambda='nu', kappa='Kruegel', distance=840e3):
r""" Calculation of the flux density in Jansky of a grey_body under
assumption of optically thin emission. Please see Notes below for an
detailed description assumptions and equations used.
Parameters
----------
p : list
List of the parameters defining a grey_body, being Temperature [K],
column density or mass (dependent on the kappa used) and the grey_body
slope index beta, respectively (refer to notes for more information):
p = [T, N, beta]
x : float or numpy array
Wavelength [GHz] or frequency [micron];
specify type in nu_or_lambda
kappa : string
Chooses the dust extinction coefficient to use:
* ``"easy"`` -> kappa = nu^beta; tau = N * kappa
* ``"Kruegel"`` -> kappa = 0.04*(nu/250Ghz)^beta;
tau = M/D^2 * kappa
Please refer to Notes below, for further explanation.
distance : float
The distance to the source that is to be modeled if kappa
``"Kruegel"`` is used.
Other Parameters
----------------
nu_or_lambda : string
Specify whether x is a frequency :math:`\nu` ``'nu'`` or a wavelength
:math:`\lambda` ``'lambda'``; default is ``'nu'``. if lambda the input
converted to a frequency in [GHz].
Notes
-----
The general equation for a grey_body is:
.. math::
S(x, \tau) = (black_body(x, T)) * [1 - e^\tau] \Omega
describing the flux coming from an solid angle
:math:`\Omega` while :math:`\tau` is:
.. math::
\tau_{\nu} = \frac{ \kappa_d(\nu) * M_{dust}}{D^2 \Omega} .
Here we assume optically thin emission and a source filling factor of
unity. This simplifies the equation of the grey_body to:
.. math::
S(x, \tau) = \tau * (black_body(x, T))
This script supports two versions of the dust extinction coefficient.::
A simple version without a lot of physics put into, kappa = ``'easy'``
which defaults to the following grey_body equation:
.. math::
S(x, \tau) = N * x^{\beta} * black_body(x,T) ,
with N being a column density scaling factor.
The second version, kappa = ``'Kruegel'`` uses the dust extinction
coefficient reported in [KS] which renders the used equation to:
.. math::
\kappa = 0.04 * (\frac{x\,[GHz]}{250\,GHz})^{\beta}
S_{\nu} = M[kg] / D^2[m^2] * \kappa * black_body(x,T) .
Examples
--------
The same examples as for :func:`black_body` apply.
References
----------
.. [KS] Kruegel, E. & Siebenmorgen, R. 1994, A&A, 288, 929
"""
x = float64(x)
T, N, beta = p
# Switch to choose kappa
if kappa == 'easy':
# Here N is an arbitrary fitParameter with no direct physical meaning.
kappa = (x * 1e9) ** beta
tau = kappa * N
if kappa == 'Kruegel':
# Be careful, for kappa='Kruegel' the start Parameter N is actually
# the Mass of the specific component in sun masses.
kappa = 0.04 * ((x * 1e9 / 250e9) ** beta) # [KS]
distance_in_m = distance * const.parsec_in_m # M33.distance is 840e3 in
# parsec. pcInM from
# constants
D2 = distance_in_m ** 2 # calculate the Distance squared [m^2]
M = N * const.m_sun # Convert the Mass which is given as a start
# Parameter in Msun to kg
tau = kappa * (M / D2)
if nu_or_lambda == 'lambda':
# Conversion of x in wavelenght [micron] to frequency [GHz].
x = const.c / (x * 1e-6) / 1e9
nu_or_lambda == 'nu'
if nu_or_lambda == 'nu':
return tau * black_body(x, T, nu_or_lambda)
# if nu_or_lambda == 'lambda':
# # Not used anymore
# print 'Warning: This part of the script is not up to date'
# return N * ((c/(x * 1e-6))**beta) * black_body(x,T,nu_or_lambda)
def multi_component_grey_body(pMulti, x, nu_or_lambda='nu', kappa='Kruegel'):
r"""
Combines multiple grey_body functions and returns the flux density in
Jansky for the input frequency/wavelenght.
Parameters
----------
pMulti : nested lists
Similar to p from
:py:func:`grey_body` but the three entries are
lists, i.e.::
pMulti = [[T1, T2, T3, ...Tn], [N1, N2, N3,...Nn], [beta]]
x : float or numpy array
frequency [micron] (or wavelenght **Not maintained**, specify type in
nu_or_lambda)
Returns
-------
sum(snu) : float
All dust components summed.
snu :
A list with the fluxes of the individual components.
See Also
--------
black_body, grey_body
Notes
-----
Only one common beta for all components can be used. May be expanded to
mutliple betas if needed.
Examples
--------
Same as for black_body, but all returned grey_bodies may be plotted.
"""
T, N, beta = pMulti
if type(beta) == list:
beta = beta[0]
snu = []
for i in range(len(T)):
pOne = [T[i], N[i], beta]
snu += [grey_body(pOne, x, nu_or_lambda, kappa=kappa)]
snu = asarray(snu)
return snu.sum(axis=0), snu
def grey_body_fit(data, start_parameter, nu_or_lambda='nu', fit_beta=False,
fix_temperature=False, rawChiSq=None, kappa='Kruegel',
residuals=False, iterations=1e9):
r"""
This function fits a multi component grey body model to an observed SED for
the optical thin case.
Parameters
----------
data : array
The obseved data. Array of shape(3, x) first row has to be the X values
(Frequency in [GHz]) of the measurements, second row the Y values
(Flux [Jy]), and the third row the Z values the errors on the
fluxes i.e.:
data = array([[X1, X2, X3, ...], [Y1, Y2, Y3,...], [Z1, Z2,
Z3, ...]])
start_parameter : array
Array of a first guess of the parameters of the grey_body components.
The number of components is arbitrary.
start_parameter = [[T1, T2, T3,...], [N1, N2, N3, ...], beta]
fit_beta : True or False
If True Beta is allowed to vary. Default is False.
fix_temperature : True or False
If True the Temperature is fixed allowed to vary.
rawChiSq :
if None the function gives the reduced chisq Value. If True the
function gives chisq without dividing it by the dof
Returns
-------
p2 : list
The final grey_body parameters that reduce the least squares for the
given dataset.
chisq/rawChiSq :
chisq is reduced chisq with degrees of freedom:
dof= #dataPoints-#freeFitParameters-1
Other Parameters
----------------
nu_or_lambda : string
Specify whether x is a frequency :math:`\nu` ``'nu'`` or
a wavelenght :math:`\lambda` ``'lambda'``; default is ``'nu'``.::
**Don't** use ``'lambda'`` as this part of the
:py:func:`grey_body` is not up-to-date.
See Also
--------
scipy.optimize.leastsq: This function is used to perform the least squares
fit.
multi_component_grey_body, grey_body, black_body: Defining the function to
be fittet to the data.
Notes
-----
A one component fit has four free parameters if beta is allowed to vary or
three if beta is fixed (one more than parameters to fit). Each additional
component adds two more free paramters to fit.
Assure that:
number of data points > number of free parameters.
"""
# Distinguish between the different options and build the parameter list
# needed by optimize.leastsq()
if not fit_beta:
if not fix_temperature:
p = start_parameter[0] + start_parameter[1]
beta = start_parameter[2]
if fix_temperature:
p = start_parameter[1]
Temps = start_parameter[0]
beta = start_parameter[2]
if fit_beta:
if not fix_temperature:
p = start_parameter[0] + start_parameter[1] + start_parameter[2]
if fix_temperature:
p = start_parameter[1] + start_parameter[2]
Temps = start_parameter[0]
def _err(p, x, y, y_error, nu_or_lambda):
"""
The function to be minimized by scipy.optimize.leastsq. It returns the
difference between the measured fluxes, y, and the calculated fluxes
for the parameters p of the SED at the given frequency x.
Parameters
----------
p : list
same start start_parameter as in grey_body_fit
x and y :
The two rows, data[0] and data[1] of the data variable from
grey_body_fit.
y_error : The absolute error on the flux measurement.
Other Parameters
----------------
nu_or_lambda : string
Specify whether x is a frequency :math:`\nu` ``'nu'`` or
a wavelenght :math:`\lambda` ``'lambda'``; default is ``'nu'``.::
**Don't** use ``'lambda'`` as this part of the
:py:func:`grey_body` is not up-to-date.
Notes
-----
This function caluclates the residuals that are minimized by
:func:`leastsq`, thus it calculates:
.. math::
(model-measurement)/\sigma
Note the :math:`\chi^2` is defined as:
.. math::
\chi^2 = \frac{1}{N} \sum{(model-measurement)/\sigma}
:warning:`Formula has to be checked.`
"""
if not fit_beta:
if not fix_temperature:
numberOfComponents = (len(p)) / 2
pMulti = [list(p[0:numberOfComponents]),
list(p[numberOfComponents:numberOfComponents * 2])]
pMulti += [beta]
if fix_temperature:
pMulti = [Temps, p, beta]
if fit_beta:
if not fix_temperature:
numberOfComponents = (len(p) - 1) / 2
pMulti = [list(p[0:numberOfComponents]),
list(p[numberOfComponents:numberOfComponents * 2]),
p[len(p) - 1]]
if fix_temperature:
numberOfComponents = len(p) - 1
pMulti = [Temps, list(p[0:numberOfComponents]), p[len(p) - 1]]
return (((multi_component_grey_body(pMulti, x, nu_or_lambda,
kappa=kappa)[0]) - y) / y_error)
# The actual fit
# maxfev : Number of integrations
# full_output: Additional informations
# args: X and Y data
p2, cov, info, mesg, success = least(_err, p, args=(data[0], data[1],
data[2], nu_or_lambda),
maxfev=int(iterations), full_output=1)
# return of the optimized parameters
dof = len(data[0]) - len(p) - 1 # degrees of Freedom
chisq = sum(info['fvec'] * info['fvec']) / dof # see above
rawchisq = sum(info['fvec'] * info['fvec'])
if not fit_beta:
if not fix_temperature:
numberOfComponents = (len(p)) / 2
p2 = [list(p2[0:numberOfComponents]),
list(p2[numberOfComponents:numberOfComponents * 2]), beta]
if fix_temperature:
numberOfComponents = len(p)
p2 = [Temps, p2, beta]
if fit_beta:
if not fix_temperature:
numberOfComponents = (len(p) - 1) / 2
p2 = [list(p2[0:numberOfComponents]),
list(p2[numberOfComponents:numberOfComponents * 2]),
[p2[len(p) - 1]]]
if fix_temperature:
numberOfComponents = (len(p) - 1)
p2 = [Temps, list(p2[0:numberOfComponents]), [p2[len(p) - 1]]]
if residuals:
return p2, chisq, info['fvec']
if rawChiSq == None:
return p2, chisq
if rawChiSq:
return p2, rawchisq
def _grid_fit(data, beta, nu_or_lambda='nu', fit_beta=False, rawChiSq=False,
kappa='Kruegel'):
r"""
Different approach to find the best fitting SED using certain ranges of
temperatures and masses to avoid unreasonably high values.
Not sure about the functionality and the code is badly written with the
temperature and mass ranges hard coded. Thats why its not public.
Once approved it may be made public again.
Parameters
----------
data : array
Same format as in grey_body_fit.
beta :
The beta value. If fit_beta=True this is varied in the fits.
kappa :
As in :func:`greybody`.
Other Parameters
----------------
fit_beta: True or False
Steers if beta is fittet or not.
rawChisq: True or False
Returns chi square without normalisation, or not.
"""
def _err(p, x, y, y_error, nu_or_lambda):
"""
The function to be minimized by scipy.optimize.leastsq. It returns the
difference between the measured fluxes, y, and the calculated fluxes
for the parameters p of the SED at the given frequency x.
Parameters
----------
p : list
same start start_parameter as in grey_body_fit
x and y :
The two rows, data[0] and data[1] of the data variable from
grey_body_fit.
y_error : The absolute error on the flux measurement.
Other Parameters
----------------
nu_or_lambda : string
Specify whether x is a frequency :math:`\nu` ``'nu'`` or
a wavelenght :math:`\lambda` ``'lambda'``; default is ``'nu'``.::
**Don't** use ``'lambda'`` as this part of the
:py:func:`grey_body` is not up-to-date.
Notes
-----
This function caluclates the residuals that are minimized by
:func:`leastsq`, thus it calculates:
.. math::
(model-measurement)/\sigma
Note the :math:`\chi^2` is defined as:
.. math::
\chi^2 = \frac{1}{N} \sum{(model-measurement)/\sigma}
:warning:`Formula has to be checked.`
This functions is different from the _err functions in grey_body_fit
because the start parameters are handles differently.
"""
if not fit_beta:
pMulti = [T, p, beta]
print pMulti
if fit_beta:
pMulti = [Temps, p]
print pMulti
return (((multi_component_grey_body(pMulti, x, nu_or_lambda, kappa)[0])
- y) / y_error)
beta = [beta]
T1 = arange(5, 70, 1)
T2 = arange(20, 100, 1)
chisqList = {}
chisqList1 = []
for i in T1:
for j in T2:
T = [i, j]
N = [1e2, 1]
if not fit_beta:
p = N
if fit_beta:
p = N + beta
p2, cov, info, mesg, success = least(_err, p, args=(data[0],
data[1], data[2],
nu_or_lambda),
maxfev=int(1e9),
full_output=1)
dof = len(data[0]) - len(p) - 1 # Degrees of Freedom
chisq = sum(info['fvec'] * info['fvec']) / dof # see above
rawchisq = sum(info['fvec'] * info['fvec'])
chisqList[chisq] = [[i, j], p2]
chisqList1 += [chisq]
chisq = sorted(chisqList)[0]
T, p2 = chisqList[sorted(chisqList)[0]]
if not fit_beta:
numberOfComponents = len(p)
p2 = [T, p2, beta]
if fit_beta:
numberOfComponents = (len(p) - 1)
p2 = [Temps, list(p2[0:numberOfComponents]), [p2[len(p) - 1]]]
if not rawChiSq:
return p2, chisq
if rawChiSq:
return p2, rawchisq
return sorted(chisq)[0]
def LTIR(p2, kappa='Kruegel', xmin=3., xmax=1100.,
distance=False, unit='JyB'):
r"""
Integration of a multi-component greybody model.
Parameters
----------
p2 : list
The parameters defining the multi-component greybody model. Same format
as p in
:py:func:`astrolyze.functions.astroFunctions.multi_component_grey_body`
kappa : string
The dust extinction coefficient used to describe the greybodies. See:
py:func:`grey_body`
xmin, xmax : float
The integration range in units of micron. Defaults to 3 -- 1100 micron.
The definition of LTIR from [DA]
unit : string
If ``'Lsun'`` the returned integrated flux is in units of solar
luminosities (erg s^-1). For this a distance is needed. If ``'JyB'``
the units are Jy/beam; distance is not used. If distance is not given
JyB is used.
Notes
-----
Needs some work to be generally usable. For units in Jy/beam the code seems
to be safe.
References
----------
.. [DA] Dale et al. 2001; ApJ; 549:215-227
"""
#Convert the input xmin/xmax from micron to m and to frequency in
#GHz
xmin = const.c / (xmin * 1e-6) / 1e9 # GHz
xmax = const.c / (xmax * 1e-6) / 1e9
step = 0.1
x = arange(floor(xmax), floor(xmin), step)
# multi_component_grey_body needs input (x) in GHz
model, grey = multi_component_grey_body(p2, x, 'nu', kappa)
# integrate the SED
LTIR1 = sum(model) * (step * 1e9) # Jy/Beam*Hz
if unit == 'Lsun' and distance:
# convert to erg s-1 /beam in terms of 1e6*Lsun
conv = 1 / units.ErgsToJansky_m * units.Int2Lum(distance, cm_or_m='m')
LTIR1 = LTIR1 * conv / const.Lsunergs / 1e6
if unit == 'JyB' or not distance:
pass
return LTIR1
def generate_monte_carlo_data_sed(data):
"""
MonteCarlo Simulation of a set of flux measurements, assuming that the
measurement data follows a gauss distribution.
This function makes use of the :func:`random.gauss` function to generate a
data point from a gauss distribution, that has a mean equal to the Flux
measurement and a standard deviation correponding to the error of the
measurement.
Parameters
----------
data : array
Same format as in grey_body_fit function:
data= [[x1, x2, x3, ...][y1, y2, y3, ...][z1, z2, z3, ...]]
with x = wavelenght/frequency, y = flux, z = error on flux.
Returns
-------
newData : array in same format as data.
The Monte-Carlo simulated measurement.
See Also
--------
random.gauss
"""
newData = copy(data)
for i in range(len(data[1])):
newData[1][i] = rnd.gauss(data[1][i], data[2][i])
return newData
def grey_body_monte_carlo(p, data, iterations):
"""
Function to evaluate the errors in the parameters fitted with the
grey_body_fit function.
It uses Monte Carlo Simulated data (from
:func:`generate_monte_carlo_data_sed`) and performs a fit to this new data
giving back the results of the fit parameters.
Parameters
----------
p : list
The parameters defining the multi component grey_body model to be
fitted. Same format as p in :py:func:`multi_component_grey_body`
data : array
The actual measured data of the SED, same format as for
:py:func:`grey_body_fitFunction`
iterations : int
Number of times new data is generated and fitted.
Returns
-------
string :
Containing the mean, standard deviation of the fit parameters, ready
to print out.
betaTlist : List of all fit results. Name misleading since it may not
include the beta.
"""
# Define the variables that store the MOnte Carlo T and N values
TList = []
NList = []
chisqList = []
betaList = []
for i in range(len(p[0])):
TList += [[]]
NList += [[]]
if len(p[0]) == 1:
betaTList = [[], [], [], [], []]
if len(p[0]) == 2:
betaTList = [[], [], [], [], [], []]
string = ''
for i in range(0, iterations):
print i + 1, '\\', iterations
sys.stdout.flush()
MCData = generate_monte_carlo_data_sed(data)
if len(p[0]) == 1:
p2, chisq = grey_body_fit(MCData, p, fit_beta=True,
fix_temperature=False)
else:
p2, chisq = grey_body_fit(MCData, p, fit_beta=False,
fix_temperature=False)
chisqList += [chisq]
x = 0
if len(p[0]) == 1:
betaTList[3] += [MCData]
betaTList[4] += [p2]
if len(p[0]) == 2:
betaTList[4] += [MCData]
betaTList[5] += [p2]
for i in range(len(p[0])):
if len(p[0]) == 1:
betaTList[0] += [p2[0][i]]
betaTList[1] += [p2[2][i]]
betaTList[2] += [p2[1][i]]
if len(p[0]) == 2:
betaTList[i] += [p2[0][i]]
betaTList[i + 2] += [p2[1][i]]
#if float(p2[0][0]) < 100:
#if float(p2[0][1]) < 100:
TList[i] += [p2[0][i]]
NList[i] += [p2[1][i]]
if x == 0:
betaList += p2[2]
else:
#if float(p2[0][i]) < 100:
TList[i] += [p2[0][i]]
NList[i] += [p2[1][i]]
if x == 0:
betaList += p2[2]
x = x + 1
betaList = asarray(betaList)
for i in range(len(p[0])):
string += ('T' + str(i + 1) + ': ' +
str("%1.2f" % mean(asarray(TList[i]))) +
' +/- ' + str("%1.2f" % std(asarray(TList[i]))) + '\n')
string += ('N' + str(i + 1) + ': ' +
str("%1.2e" % mean(asarray(NList[i]))) +
' +/- ' + str("%1.2e" % std(asarray(NList[i]))) + '\n')
string += ('Beta: ' + str("%1.2f" % mean(betaList)) + ' +/- ' +
str("%1.2f" % std(betaList)) + '\n')
string += 'Number of Fits ' + str(len(TList[0])) + '\n'
if len(p[0]) == 1:
return string, betaTList
else:
return string, betaTList
def line(p, x):
r"""
Line `y = m*x + b` equation. Returns y value at point x.
Parameters
----------
p : list
Contains the slope and the y-axis intersection of the line [m, b].
Returns
-------
y : value of y corresponding to x.
"""
return p[0] * x + p[1]
def anti_line(p, y):
r"""
Inverse of a line returning the x value corresponding to a y value, i.e.
`x = y/m - b`.
Parameters
----------
p : list
Contains the slope and the y-axis intersection of the line [m, b].
Returns
-------
y : value of x corresponding to y.
"""
return y / p[0] - p[1]
def linear_error_function(p, x, y, y_error, x_error):
"""
Error function, i.e. residual from the measured value, which has to be
minimised in the least square fit taking X and Y Error into account.
Parameters
----------
p : list
Same as in :func:`line` and :func:`anti_line`.
x : float or list
x measurements. Data.
y : float or list
y measurements. Data.
x_error : float or list
The x measurment errors.
y_error : float or list
The y measurment errors.
"""
if x_error.all():
return sqrt(((line(p, x) - y) / y_error) ** 2 + ((anti_line(p, y) - x)
/ x_error) ** 2)
if not x_error.all():
return sqrt(((line(p, x) - y) / y_error) ** 2)
def line_fit(p, x, y, y_error, x_error=False, iterations=10000):
"""
Linear Fit to data, taking either errors in y or both in x and y into
account.
Parameters
----------
p : list
Containg slope (m) and y-axis intersection (b) p=[m, b]. Same as in
:func:`line` and :func:`antiline`.
x : float or list
x measurements. Data.
y : float or list
y measurements. Data.
y_error : float or list
The y measurment errors.
x_error : float or list
The x measurment errors. If unset only errors in y are taken into
account.
"""
p1, cov, info, mesg, success = least(linear_error_function, p,
args=(x, y, y_error, x_error),
maxfev=int(iterations), full_output=1)
dof = len(x) - len(p) - 1 # degrees of Freedom
chisq = sum(info['fvec'] * info['fvec']) / dof
Error = std(info['fvec'])
return p1, chisq, Error
def analytic_linear_fit(x, y, x_error, y_error):
r"""
This function resembles the analytical solution following chaper 8 from
[TA].
Parameters
----------
x : float or list
x measurements. Data.
y : float or list
y measurements. Data.
y_error : float or list
The y measurment errors.
x_error : float or list
The x measurment errors. If unset only errors in y are taken into
account.
Notes
-----
Without errors the following holds:
.. math::
y = A + B x
A = \frac{\Sigma(x^2) \cdot \Sigma(y) - \Sigma(x) \cdot
\Sigma(x \cdot y)}{\Delta}
B = N \frac{\Sigma(x \cdot y) - \Sigma (x) \cdot \Sigma(y)}{\Delta}
\Delta = N \cdot \Sigma(x^2) - (\Sigma(x))^2
.. warning:: This has to be checked.
References
----------
.. [TA] "An introduction to the study of uncertainties in physical
measurement" by John R. Taylor.
"""
# first calculate a least squares fit ignoring the errors since B is
# needed for the more complex issue including errors
sumX = sum(x)
sumY = sum(y)
sumXY = sum(x * y)
sumXSq = sum(x ** 2)
N = len(x)
Delta = N * sumXSq - (sumX) ** 2
A = (sumXSq * sumY - sumX * sumXY) / Delta
B = (N * sumXY - sumX * sumY) / Delta
print 'm = ' + '%1.2f' % A + ' b = ' + '%1.2f' % B
# now make use of the idea of a equivalent error only in y defined by
# equivalentError = sqrt(y_error**2+(B*x_error)**2)
equivalentError = y_error # sqrt(y_error**2+(B*x_error)**2)
# and use wheighted least square fit see definition for A and B and
# their errors below
weight = 1. / (equivalentError) ** 2
sumWeightX = sum(weight * x)
sumWeightY = sum(weight * y)
sumWeightXY = sum(weight * x * y)
sumWeightXSq = sum(weight * x ** 2)
sumWeight = sum(weight)
WeightDelta = (sumWeight * sumWeightXSq) - ((sumWeightX) ** 2)
A = (((sumWeightXSq * sumWeightY) - (sumWeightX * sumWeightXY)) /
WeightDelta)
B = ((sumWeight * sumWeightXY) - (sumWeightX * sumWeightY)) / WeightDelta
SigmaA = sqrt(sumWeightXSq / WeightDelta)
SigmaB = sqrt(sumWeight / WeightDelta)
Chisq = sum((y - A - B * x) ** 2 / equivalentError ** 2) / (N - 2)
print ('m = ' + '%1.2f' % A + '+/-' + '%1.2f' % SigmaA + ' b = ' + '%1.2f'
% B + '+/-' + '%1.2f' % SigmaB + ' chisq:' + '%1.2f' % Chisq)
return A, SigmaA, B, SigmaB, Chisq
def generate_monte_carlo_data_line(data, errors):
"""
This function makes a Monte Carlo Simulation of a data Set of measurements
it uses the random.gauss() function to generate a data point
from a gauss distribution, that has a mean equal to the measurement
and its standard deviation corresonding to the error of the measurement.
Parameters
----------
data : list
A list of original measurements.
errors : list
A list of the corresponding errors.
Returns
-------
newData : array in same format as data.
The monte carlo simulated measurement.
See Also
--------
random.gauss
"""
newData = copy(data)
for i in range(len(data)):
newData[i] = rnd.gauss(data[i], errors[i])
return newData
def line_monte_carlo(p, x, y, x_error, y_error, iterations,
fitIterations=1e9):
"""
Gererate an estimate of the errors of the fitted parameters determined by
the :py:func:`line_fit` function.
Parameters
----------
p : list
Containg slope (m) and y-axis intersection (b) p=[m, b]. Same as in
:func:`line` and :func:`antiline`.
x : float or list
x measurements. Data.
y : float or list
y measurements. Data.
y_error : float or list
The y measurment errors.
x_error : float or list
The x measurment errors. If unset only errors in y are taken into
account.
Returns
-------
string : A string containing the results.
BList : A list containing the fittet y-Axis intersections.
MList : A list containing the fitted slopes.
chisqList : A list with the chisq values.
resultArray : Array with the mean and the standard deviations of
slopes and y-axis intersections, i.e. [mean(M), std(M), mean(B),
std(B)]
See Also
--------
grey_body_fit, generate_monte_carlo_data_line
"""
# Define the variables that store the MOnte Carlo B and M values y= mx+b
BList = []
MList = []
chisqList = []
FitList = []
string = ''
for i in range(0, iterations):
sys.stdout.write(str(i + 1) + '\\' + str(iterations) + '\r')
sys.stdout.flush()
xMCData = generate_monte_carlo_data_line(x, x_error)
yMCData = generate_monte_carlo_data_line(y, y_error)
p2, chisq, Error = line_fit(p, xMCData, yMCData, x_error, y_error,
XY_or_Y='XY', iterations=10000)
chisqList += [chisq]
BList += [p2[1]]
MList += [p2[0]]
string += ('B: ' + str("%1.2f" % mean(asarray(BList))) + ' +/- ' +
str("%1.2f" % std(asarray(BList))) + '\n')
string += ('M: ' + str("%1.2f" % mean(asarray(MList))) + ' +/- ' +
str("%1.2f" % std(asarray(MList))) + '\n')
string += ('Chi: ' + str("%1.2f" % mean(asarray(chisqList))) + ' +/- ' +
str("%1.2f" % std(asarray(chisqList))) + '\n')
string += 'Number of Fits ' + str(len(BList)) + '\n'
resultArray = [mean(asarray(MList)), std(asarray(MList)),
mean(asarray(BList)), std(asarray(BList))]
return string, BList, MList, chisqList, resultArray
def gauss1D(x, fwhm, area, offset=0):
r"""
Calulcates 1D Gaussian.
Parameters
----------
x : float or numpy.ndarray
the x-axis value/values where the Gaussian is to be caluclated.
fwhm : float
The width of the Gaussian.
offset :
The offset in x direction from 0. Default is 0.
Returns
-------
gauss : float or np.ndarray
The y value for the specified Gaussian distribution evaluated at x.
Notes
-----
The function used to describe the Gaussian is:
.. math::
f = \frac{1}{fwhm * sqrt(2 * \pi)} * e^{-1/2 (\frac{x-x0}{fwhm})^2}
"""
gauss_factor_1 = 1.665109
gauss_factor_2 = 1.064467
gauss = ((x - offset) / fwhm * gauss_factor_1) ** 2
gauss = exp(-1 * gauss)
height = area / (fwhm * gauss_factor_2)
gauss = height * gauss
return gauss
def fwzi(noise, fwhm, area, offset=0):
r"""
Calulcates 1D Gaussian.
Parameters
----------
x : float or numpy.ndarray
the x-axis value/values where the Gaussian is to be caluclated.
fwhm : float
The width of the Gaussian.
offset :
The offset in x direction from 0. Default is 0.
Returns
-------
gauss : float or np.ndarray
The y value for the specified Gaussian distribution evaluated at x.
Notes
-----
The function used to describe the Gaussian is:
.. math::
f = \frac{1}{fwhm * sqrt(2 * \pi)} * e^{-1/2 (\frac{x-x0}{fwhm})^2}
"""
gauss_factor_1 = 1.665109
gauss_factor_2 = 1.064467
height = area / (fwhm * gauss_factor_2)
ratio = height / noise
print ratio
if ratio < 1:
fwzi = 0
return 0
# see wikipedia
c = (fwhm) / math.sqrt(2) / gauss_factor_1
fwzi = 2. * math.sqrt(2. * math.log(ratio)) * c
return fwzi
def gauss2D(x, y, major, minor, pa=0, xOffset=0, yOffset=0, amplitude=1):
r"""
Calculates a 2D Gaussian at position x y.
Parameters
----------
x : float or numpy.ndarray
the x-axis value/values where the Gaussian is to be caluclated.
y : float or numpy.ndarray
the y-axis value/values where the Gaussian is to be calculated.
major, minor : float
The fwhm of the Gaussian in x and y direction.
pa : float
The position angle of the Gaussian in degrees. Default is 0.
xOffset, yOffset:
The offset in x and y direction from 0. Default is 0.
amplitude :
The height of the Gaussian. Default is 1.
Returns
-------
gauss : float or np.ndarray
The y value for the specified Gaussian distribution evaluated at x.
Notes
-----
The function used to describe the Gaussian is :
.. math::
f = (amplitude * exp (-1 (a*(x-xOffset)^2 + 2*b*(x-xOffset)*(y-yOffset)
+ c*(y-yOffset)^2)))
where:
.. math::
a = cos(pa)**2/(2*major**2) + sin(pa)**2/(2*minor**2) \\
b = (-1*sin(2*pa)/(4*major**2))+(sin(2*pa)/(4*minor**2)) \\
c = sin(pa)**2/(2*major**2) + cos(pa)**2/(2*minor**2) \\
"""
pa = pa * math.pi / 180
a = cos(pa) ** 2 / (2 * major ** 2) + sin(pa) ** 2 / (2 * minor ** 2)
b = ((-1 * sin(2 * pa) / (4 * major ** 2)) + (sin(2 * pa) / (4 * minor **
2)))
c = sin(pa) ** 2 / (2 * major ** 2) + cos(pa) ** 2 / (2 * minor ** 2)
gauss = a * (x - xOffset) ** 2
gauss += 2 * b * (x - xOffset) * (y - yOffset)
gauss += c * (y - yOffset) ** 2
gauss = exp(-1 * gauss)
gauss = amplitude * gauss
def degrees_to_equatorial(degrees):
r"""
Converts RA, DEC coordinates in degrees to equatorial notation.
Parameters
----------
degrees : list
The coordinates in degrees in the format of: [23.4825, 30.717222]
Returns
-------
equatorial : list
The coordinates in equatorial notation, e.g.
corresponding ['1:33:55.80', '+30:43:2.00'].
"""
coordinate = []
coordinate += [str(int(degrees[0] / 15)) + ':' + str(int(((degrees[0] / 15)
- int(degrees[0] / 15)) * 60)) + ':' + "%1.2f" %
(float(str((((degrees[0] / 15 - int(degrees[0] / 15)) * 60) -
int((degrees[0] / 15 - int(degrees[0] / 15)) * 60)) * 60)))]
coordinate += [(str(int(degrees[1])) + ':' +
str(int(math.fabs(int((float(degrees[1]) - int(degrees[1])) *
60)))) + ':' + "%1.2f" %
(math.fabs(float(str(float(((float(degrees[1]) - int(degrees[1]))
* 60) - int((float(degrees[1]) - int(degrees[1])) * 60)) *
60)))))]
return coordinate
def equatorial_to_degrees(equatorial):
r"""
Converts RA, DEC coordinates in equatorial notation to degrees.
Parameters
----------
equatorial : list
The coordinates in degress in equatorial notation, e.g.
['1:33:55.80', '+30:43:2.00']
Returns
-------
degrees : list
The coordinates in degreees, e.g. [23.4825, 30.717222].
Raises
------
SystemExit
If ``equatorial`` is not a list of strings in the above format.
"""
try:
CoordsplitRA = equatorial[0].split(':')
CoordsplitDec = equatorial[1].split(':')
except AttributeError, e:
print e
raise SystemExit('Input has to be equatorial coordinates.')
if float(CoordsplitDec[0]) > 0:
degrees = [(float(CoordsplitRA[0]) * (360. / 24) +
float(CoordsplitRA[1]) * (360. / 24 / 60) +
float(CoordsplitRA[2]) * (360. / 24 / 60 / 60)),
(float(CoordsplitDec[0]) + float(CoordsplitDec[1]) * (1. /
60) + float(CoordsplitDec[2]) * 1. / 60 / 60)]
if float(CoordsplitDec[0]) < 0:
degrees = [(float(CoordsplitRA[0]) * (360. / 24) +
float(CoordsplitRA[1]) * (360. / 24 / 60) +
float(CoordsplitRA[2]) * (360. / 24 / 60 / 60)),
(float(CoordsplitDec[0]) - float(CoordsplitDec[1]) * (1. /
60) - float(CoordsplitDec[2]) * 1. / 60 / 60)]
return degrees
def calc_offset(central_coordinate, offset_coordinate, angle = 0,
output_unit='arcsec'):
r"""
Calculates the offset between two coordinates.
Parameters
----------
central_coordinate : list
The reference coordinate in degrees or equatorial.
offset_coordinate : list
The second coordinate, the offset will be with rescpect to
central_coordinate.
angle : float
The angle in degrees, allowing rotated systems.
Returns
-------
rotated_offset : list
The offsets, rotated only if angle given.
Notes
-----
This functions includes a correction of the RA offset with declination:
.. math:
ra_corrected = ra cos(dec)
"""
possible_units = ['DEGREE', 'DEGREES', 'ARCMINUTE', 'ARCMINUTES', 'ARCSEC',
'ARCSECS']
if output_unit.upper() not in possible_units:
raise ValueError('Unit has to be one of the following. "' +
'" "'.join(possible_units).lower() + '"')
angle = math.radians(angle)
central_in_degrees = equatorial_to_degrees(central_coordinate)
offset_in_degrees = equatorial_to_degrees(offset_coordinate)
offset = [offset_in_degrees[0] - central_in_degrees[0] ,
offset_in_degrees[1] - central_in_degrees[1]]
# correction for declination
offset = [offset[0] * math.cos(math.radians(offset_in_degrees[1])),
offset[1]]
# Rotate the offsets.
rotated_offset = rotation_2d(offset, angle)
rotated_offset = asarray(rotated_offset)
if output_unit.upper() in ['DEGREE', 'DEGREES']:
pass
if output_unit.upper() in ['ARCMINUTE', 'ARCMINUTES']:
rotated_offset = rotated_offset * 60
if output_unit.upper() in ['ARCSEC', 'ARCSECS']:
rotated_offset = rotated_offset * 60 * 60
print rotated_offset
return rotated_offset
def rotation_2d(coordinate, angle):
r"""
Implementation of the rotation matrix in two dimensions.
Parameters
----------
coordinates : list of floats
Coordinates in the unrotated system [x, y].
angle : float
The rotation angle
Returns
-------
[x_rotated, y_rotated]: list of floats
Coordinates in the rotated system.
"""
x, y = coordinate
x_rotated = math.cos(angle) * x - math.sin(angle) * y
y_rotated = math.sin(angle) * x + math.cos(angle) * y
return [x_rotated, y_rotated]
def vel_to_freq_resolution(center_frequency, velocity_resolution):
r""" Converts a velocity resolution to frequency resolution for a given
center frequency.
Parameters
----------
center_frequency : float
Center frequency in GHz.
velocity_resolution :
Velocity resolution in km/s.
Returns
-------
frequency_resolution : float
The corresponding frequency resolution in Mhz
Notes
-----
Approved!
"""
# Conversion from km/s to m/s
velocity_resolution = velocity_resolution * 1e3
# Conversion from GHz to Hz
center_frequency = center_frequency * 1e9
# Calculation of the frequency_resolution in Hz
frequency_resolution = (-1 * velocity_resolution *
center_frequency / const.c)
# Conversion to MHz
frequency_resolution = frequency_resolution / 1e6
return frequency_resolution
def freq_to_vel_resolution(center_frequency, frequency_resolution):
r""" Function to convert a frequency resolution to a velocity resolution
for a given center frequency.
Parameters
----------
center_frequency : float
Center frequency in GHz.
frequency_resolution : float
The frequency resolution in MHz.
Returns
-------
velocity_resolution in km/s.
Notes
-----
Uses the formula TODO v_LSR = c((nu0-nuObs)/nu0)
Approved!
"""
center_frequency = center_frequency * 1e9
frequency_resolution = frequency_resolution * 1e6
observation_frequency = center_frequency + frequency_resolution
velocity_resolution = v_lsr(center_frequency,
observation_frequency)
# Difference between nu0 and nuObs is the velocity resolution
return velocity_resolution
def v_lsr(center_frequency, observation_frequency):
r""" Calculates the velocity that corresponds to a certain frequency shift
between two frequencies.
Parameters
----------
center_frequency : float
center_frequency in GHz
observation_frequency : float
The observation frequency in GHz.
Returns
-------
v_lsr : float
The velocity corresponding to the frequency shift in km/s
Notes
-----
Approved!
"""
center_frequency = center_frequency * 1e9
observation_frequency = observation_frequency * 1e9
v_lsr = (const.c * ((center_frequency - observation_frequency) /
center_frequency) / 1e3)
return v_lsr
def redshifted_frequency(rest_frequency, v_lsr):
r""" Calculates the sky frequency corresponding to a rest frequency for a
source with a velocity v_lsr.
Parameters
----------
rest_frequency : float
The frequency of the line at rest in Ghz (More often state the obvious
:)).
v_lsr : float
The velocity of the source in km/s.
Returns
-------
redshifted_frequency : float
The sky frequency in GHz.
Notes
-----
The formula used is:
.. math::
\nu_{sky} = \nu_{rest} * \frac{-1 v_{lsr}}{c + 1}
Approved!
"""
# Convert frequency to Hz,
rest_frequency = rest_frequency * 1e9
# Convert velocity to m/s,
v_lsr = v_lsr * 1e3
# Calculate the sky frequency,
redshifted_frequency = (-1. * v_lsr / const.c + 1) * rest_frequency
# Convert to GHz.
redshifted_frequency = redshifted_frequency / 1e9
return redshifted_frequency
def frequency_to_wavelength(frequency):
r"""
Converting frequency to wavelength.
Parameters
----------
frequency : float [GHZ]
Returns
-------
wavelength : float [micron]
"""
# Conversion from GHz to Hz
frequency = frequency * 1e9
wavelength = const.c / frequency
# Conversion from m to micron (mum).
wavelength = wavelength / 1e-6
return wavelength
def _equatorial2DegFile(inputFile):
'''
Old Functions if needed can be made public...
converts equatorial coordinates to degrees
format of input File must be:
sourcName Ra Dec
with Space/tab between the entries
'''
filein = open('positions.txt').readlines()
coords = []
for i in filein:
i = i.split()
coords+=[[i[1], i[2]]]
print coords
for i in coords:
print (filein[x].split()[0], equatorial2Deg(i)[0], ',',
equatorial2Deg(i)[1])
x+=1
if __name__ == "__main__":
import doctest
doctest.testmod()
|
bsd-3-clause
|
untom/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
enricopal/snowball_decision
|
decision_algorithm.py
|
1
|
25944
|
import numpy as np
import random
import networkx as nx
from operator import itemgetter
import pandas as pd
import sys
import json
import optparse
###############################################################
#### CONCORDANCE, DISCORDANCE AND CREDIBILITY FUNCTIONS ######
###############################################################
def conc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
#define the concordance and discordance functions
def conc_func_tri(i,j,k): #computes the concordance given a pair alternative-profile i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func_tri(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
def concordance_tri(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func_tri(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility_tri(i,j):
c = concordance_tri(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func_tri(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
return fact
#define the concordance and discordance for a pair of alternatives
def concordance(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility(i,j):
c = concordance(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
return fact
def discrimination_thresh(x):#non constant threshold
return a - b*x
#########################################
############ ALGORITHMS #################
#########################################
#distillation algorithm
def compute_scores_2(cred_matrix,altern_list):
n = len(altern_list)
scores = {} #vector holding the score of each alternative
keys = altern_list
for i in keys: #initialize to 0 the scores
scores[i] = 0
#compute the max credibility
l = max(cred_matrix.values())
alpha = discrimination_thresh(l) #compute the discrimination threshold
for i in altern_list: #for each alternative
for j in altern_list:
if i!=j: #excluding the diagonal elements
if(cred_matrix[(i,j)] >= l - alpha):
scores[i] += 1
if(cred_matrix[(j,i)] >= l - alpha):
scores[i] -= 1
return scores
#what happens when there are more than two alternatives
def runoff(cred_matrix,maxima_matrix, maxima):
scores = {}
scores = compute_scores_2(maxima_matrix,maxima) #first step of the algorithm
#check if there is a unique max
maxima_run = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima_run.append(i)
if len(maxima_run) == 1: #if there is a unique max
ranking.append(maxima_run[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima_run[0] or j == maxima_run[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima_run[0])
distillation_2(cred_matrix)
elif len(maxima_run) > 1:#otherwise put them all together with the same ranking
ranking.append(maxima_run)
#eliminate the winning alternatives from the matrix
if len(cred_matrix) > len(maxima_run):#se ho altre alternative di cui fare il ranking, rimuovo quelle ottenute
#print cred_matrix
for j in maxima_run:
altern_list.remove(j)
for i,k in cred_matrix.keys():
if i == j or k == j:
del cred_matrix[(i,k)]
#print cred_matrix.values(), maxima_run
distillation_2(cred_matrix)
else: #altrimenti l'algoritmo si ferma
return ranking
#initializing the variables
def distillation_2(cred_matrix):
#print cred_matrix
if len(cred_matrix) == 1: #there is just one alternative left, the algorithm has to stop
ranking.append(altern_list[0]) #add the last element
if len(cred_matrix) > 1: #are there any more alternatives to rank?
scores = {}
scores = compute_scores_2(cred_matrix,altern_list) #first step of the algorithm
#check if there is a unique max
maxima = []
#index_maxima = []
nonmaxima = []
#nonmaxima_all = []
#index_nonmaxima = []
maxima_matrix = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima.append(i)
else:
nonmaxima.append(i)
if len(maxima) == 1: #if there is a unique max
ranking.append(maxima[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima[0] or j == maxima[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima[0])
distillation_2(cred_matrix)
if len(maxima) > 1:
#devo costruire la sottomatrice dei massimi
#rimuovo quelli che non sono massimi dalla matrice di credibilit
maxima_matrix = {}
for i in cred_matrix.keys():
maxima_matrix[i] = cred_matrix[i]
for k in nonmaxima: #elimino tutti i non_massimi
for i,j in maxima_matrix.keys():
if i == k or j == k:
del maxima_matrix[(i,j)]
#print cred_matrix
#then I apply the runoff to the submatrix of maxima
runoff(cred_matrix,maxima_matrix, maxima)
return ranking
#what happens when there are more than two alternatives
def runoff_asc(cred_matrix,minima_matrix, minima):
scores = {}
scores = compute_scores_2(minima_matrix,minima) #first step of the algorithm
#find the minima
minima_run = []
minimum = min(scores.values())
for i in scores.keys():#create a list with the alternatives that have minimum score
if scores[i] == minimum:
minima_run.append(i)
#check if there is a unique min
if len(minima_run) == 1: #if there is a unique max
ranking.append(minima_run[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == minima_run[0] or j == minima_run[0]:
del cred_matrix[(i,j)]
altern_list.remove(minima_run[0])
distillation_2_asc(cred_matrix)
elif len(minima_run) > 1:#otherwise put them all together with the same ranking
ranking.append(minima_run)
#eliminate the winning alternatives from the matrix
if len(cred_matrix) > len(minima_run):#se ho altre alternative di cui fare il ranking, rimuovo quelle ottenute
for j in minima_run:
altern_list.remove(j)
for i,k in cred_matrix.keys():
if i == j or k == j:
del cred_matrix[(i,k)]
distillation_2_asc(cred_matrix)
else: #altrimenti l'algoritmo si ferma
return ranking
def distillation_2_asc(cred_matrix):
#there is just one alternative left, the algorithm has to stop
if len(cred_matrix) == 1:
#print cred_matrix
ranking.append(altern_list[0]) #add the last element
#are there any more alternatives to rank?
if len(cred_matrix) > 1:
scores = {}
scores = compute_scores_2(cred_matrix,altern_list) #first step of the algorithm
#find the minima
minima = []
nonminima = []
minima_matrix = []
minimum = min(scores.values())
for i in scores.keys():#create a list with the alternatives that have minimum score
if scores[i] == minimum:
minima.append(i)
else:
nonminima.append(i)
if len(minima) == 1: #if there is a unique max
ranking.append(minima[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == minima[0] or j == minima[0]:
del cred_matrix[(i,j)]
altern_list.remove(minima[0])
distillation_2_asc(cred_matrix)
#if there's more than a minimum
if len(minima) > 1:
#devo costruire la sottomatrice dei minimi
#rimuovo quelli che non sono minimi dalla matrice di credibilit
minima_matrix = {}
for i in cred_matrix.keys():
minima_matrix[i] = cred_matrix[i]
for k in nonminima: #elimino tutti i non minimi
for i,j in minima_matrix.keys():
if i == k or j == k:
del minima_matrix[(i,j)]
#then I apply the runoff to the submatrix of maxima
runoff_asc(cred_matrix,minima_matrix, minima)
return ranking
def ELECTREIII(x):
global alternatives
alternatives = x
#################################
### credibility matrix ##########
#################################
cred_matrix = {} #described by a dictionary taking a tuple (i,j) as key
for i in range(n): #assigning the values to the cred_matrix
for j in range(n):
cred_matrix[(i,j)] = credibility(i,j)
################################
## computing the threshold #####
################################
#compute the max element l of the cred_matrix
l = max(cred_matrix.values())
#calcolo alpha
alpha = a - b*l
#############################
####### distillation ########
#############################
#calculating discending ranking
global ranking
ranking = []
global altern_list
altern_list = range(n)
disc_order = distillation_2(cred_matrix)
#calculating ascending ranking
ranking = []
altern_list = range(n)
#reinitializing the credibility matrix
cred_matrix = {} #described by a dictionary taking a tuple (i,j) as key
for i in range(n): #assigning the values to the cred_matrix
for j in range(n):
cred_matrix[(i,j)] = credibility(i,j)
'''
asc_order = distillation_2_asc(cred_matrix)
#the asc_order must be reversed
asc_order = asc_order[::-1]
#print disc_order, asc_order
#turning lists into dictionaries
rank_asc = {}
'''
rank_disc = {}
'''
for i in range(len(asc_order)):
if type(asc_order[i]) == list:#means I can iter through it
for j in asc_order[i]:
rank_asc[j] = i
else: #if it is a single number I can make directly the association
rank_asc[asc_order[i]] = i
'''
for i in range(len(disc_order)):
if type(disc_order[i]) == list:
for j in disc_order[i]:
rank_disc[j] = i
else:
rank_disc[disc_order[i]] = i
#######################################
##### combining the rankings ##########
#######################################
adjacency = np.zeros((n,n))
'''
#compare all pair of alternatives
#if i outranks j in one of the two orders and j does not outrank i in the other, i outranks j in the final order
#otherwise, they are incomparable
#N.B. the lower the ranking, the better
for i in range(n):
for j in range(n):
if i != j:
if rank_asc[i] < rank_asc[j] and rank_disc[i] <= rank_disc[j]:
adjacency[i,j] = 1
if rank_disc[i] < rank_disc[j] and rank_asc[i] <= rank_asc[j]:
adjacency[i,j] = 1
#creating the outranking graph
G = nx.DiGraph()
G.add_nodes_from(range(n))
for i in range(n):
for j in range(n):
if adjacency[i,j] == 1:
G.add_edge(i,j)
indegree = nx.in_degree_centrality(G)
rank = {}
for i in G.nodes():
rank[i] = (n-1)*indegree[i]
#print asc_order
#rescaling to an ordinal sequence
#let us count the number of distinct elements in the indegree
count = 1
for i in range(len(rank.values())-1):
if rank.values()[i] != rank.values()[i+1]:
count += 1
'''
#sorted_rank = sorted(rank.iteritems(), key=itemgetter(1)) #list representing the pair of values
sorted_rank = sorted(rank_disc.iteritems(), key=itemgetter(1)) #list representing the pair of values
#transformation to the data
sorted_rank = np.array(sorted_rank)
for i in range(len(sorted_rank) - 1):
if sorted_rank[i + 1][1] - sorted_rank[i][1] > 1:
sorted_rank[i + 1][1] = sorted_rank[i][1] + 1
final_rank = {}
for i,j in sorted_rank:
final_rank[i] = j
return sorted_rank
####################################
##### RUN THE ALGORITHM ############
####################################
def decision_ranking(inputs, crit_weights, mitigation_strategies, indiff, pref, veto):
dati = pd.read_json(inputs)
global m
m = len(dati) #number of criteria
#normalizing the weights
global weights
weights = np.array(crit_weights)
total_weight = sum(weights)
if total_weight == 0:
weights = [1./m for i in range(m)]
else:
weights = weights/total_weight
#parameters of the model (vectors)
#vetos threshold
#concordance threshold
#discordance threshold
global vetos, pref_thresh, indiff_thresh,a,b
vetos = veto
pref_thresh = pref
indiff_thresh = indiff
#threshold parameters
a = 0.3
b = 0.15
length = len(dati.keys()) -1
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
global n
n = len(alternatives) #number of strategies
N = 101 #number of runs
results = [] #saving the ranking for each run
for i in range(N): #ripeto N volte
#original matrix
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
#random sampled
alternat = np.zeros((n,m))
#alternat[i,j] is the random sampling of a poissonian distribution of average alternatives[i,j]
for i in range(n):
for j in range(m):
alternat[i,j] = np.random.poisson(alternatives[i,j])
results.append(ELECTREIII(alternat))
#dictionary assigning to each alternative a list of its rankings
ranking_montecarlo = {}
#initializing
for i in range(n):
ranking_montecarlo[i] = []
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0])
l = int(j[1])
ranking_montecarlo[k].append(l)
#now we can compute the median
final_ranking_montecarlo = {}
for i in ranking_montecarlo.keys():
final_ranking_montecarlo[i] = np.median(ranking_montecarlo[i])
#compute the ranking distribution
#occurrences tells us the frequency of ranking r for alternative i
occurrences = np.zeros((n,n))
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0]) #alternative
l = int(j[1]) #rank
occurrences[k,l] += 1 #everytime I encounter the couple, I increment the frequency
#assign their names to the alternatives
named_final_ranking = {}
for i in final_ranking_montecarlo.keys():
named_final_ranking[dati.keys()[i+1]] = final_ranking_montecarlo[i] + 1 #assegno i nomi e faccio partire il ranking da 1
#assign the names to the ranking distributions
ranking_distributions = {}
var = 1
for i in occurrences:
ranking_distributions[dati.keys()[var]] = i
var += 1
####################
### OUTPUTS DATA ###
####################
#print "The medians of the ranking distributions are\n"
#print named_final_ranking
#print "\n"
#print "The ranking distributions are: \n"
#print ranking_distributions
return (named_final_ranking, ranking_distributions)
def ELECTRETri(x):
global alternatives
alternatives = x
#################################
###### credibility matrix #######
#################################
cred_matrix = np.zeros((n,M)) #initializing the credibility matrix
for i in range(n): #assigning the values to the cred_matrix
for j in range(M):
cred_matrix[i,j] = credibility_tri(i,j)
#################################
### turn the fuzzy into crisp ###
#################################
for i in range(n):
for j in range(M):
if cred_matrix[i,j] > lambd: #if cred is greater than a threshold
cred_matrix[i,j] = 1
else:
cred_matrix[i,j] = 0
###################################
########## exploration ############
###################################
pessimistic = {}
#per ogni alternativa calcolo quali reference profiles surclassa
for i in range(n):
pessimistic[i] = []
for j in range(M):
if cred_matrix[i,j] == 1:
pessimistic[i].append(j)
#dopodich individuo il migliore fra questi
for i in pessimistic.keys():
pessimistic[i] = min(pessimistic.values()[i])
#trasformo il dizionario in una lista ordinata
pessimistic = sorted(pessimistic.iteritems(), key = itemgetter(1))
return pessimistic
def decision_sorting(inputs, crit_weights,mitigation_strategies, indiff, pref, veto, prof):
dati = pd.read_json(inputs)
global m
m = len(dati) #number of criteria
#normalizing the weights
global weights
weights = np.array(crit_weights)
total_weight = sum(weights)
if total_weight == 0:
weights = [1./m for i in range(m)]
else:
weights = weights/total_weight
#parameters of the model (vectors)
#vetos threshold
#concordance threshold
#discordance threshold
global vetos, pref_thresh, indiff_thresh,lambd
vetos = veto
pref_thresh = pref
indiff_thresh = indiff
length = len(dati.keys())-1
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
global n
n = len(alternatives) #number of strategies
lambd = 0.75
#alternatives = np.array((dati['Basic building retrofitting'], dati['Enhanced building retrofitting'],dati['Evacuation'],dati['No mitigation']))
#n = len(alternatives)
global profiles
profiles = prof
#profiles = np.array(([5, 5,0,2,1,3,6], [25, 3500000,2500000,7000,180000,80,200],[1000, 2000000000,180000000,2000008,15020000,3000,6000]))
global M
M = len(profiles) #number of classes
N = 101 #number of runs
results = [] #saving the ranking for each run
for i in range(N): #ripeto N volte
#original matrix
alternatives = np.array([dati[mitigation_strategies[i]] for i in range(length)])
#random sampled
alternat = np.zeros((n,m))
#alternat[i,j] is the random sampling of a poissonian distribution of average alternatives[i,j]
for i in range(n):
for j in range(m):
alternat[i,j] = np.random.poisson(alternatives[i,j])
results.append(ELECTRETri(alternat))
#dictionary assigning to each alternative a list of its categoriess
sorting_montecarlo = {}
#initializing
for i in range(n):
sorting_montecarlo[i] = []
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0])
l = int(j[1])
sorting_montecarlo[k].append(l)
#now we can compute the median
final_sorting_montecarlo = {}
for i in sorting_montecarlo.keys():
final_sorting_montecarlo[i] = np.median(sorting_montecarlo[i])
#we can assign letters instead of numbers
for i in final_sorting_montecarlo.keys():
if final_sorting_montecarlo[i] == 0:
final_sorting_montecarlo[i] = 'A'
elif final_sorting_montecarlo[i] == 1:
final_sorting_montecarlo[i] = 'B'
elif final_sorting_montecarlo[i] == 2:
final_sorting_montecarlo[i] = 'C'
#building the probability distribution
#occurrences tells us the frequency of ranking r for alternative i
occurrences = np.zeros((n,M))
for i in results:
for j in i: #coppia alternative-rank
k = int(j[0]) #alternative
l = int(j[1]) #rank
occurrences[k,l] += 1 #everytime I encounter the couple, I increment the frequency
#assign their names to the alternatives
named_final_sorting = {}
for i in final_sorting_montecarlo.keys():
named_final_sorting[dati.keys()[i+1]] = final_sorting_montecarlo[i] #assegno i nomi e faccio partire il ranking da 1
#assign the names to the ranking distributions
sorting_distributions = {}
var = 1
for i in occurrences:
sorting_distributions[dati.keys()[var]] = i
var += 1
####################
### OUTPUTS DATA ###
####################
return (named_final_sorting, sorting_distributions)
#a = decision_sorting('santorini/scenario1_input.json',[0.2,0.1,0.3,0.0,0.2,0.1,0.1],['EVC_anteEQ1','EVC_anteEQ1_anteEQ2','No Mitigation'],
#print a[0],a[1]
b = decision_ranking('santorini/scenario1_input.json',[5,3,2,1,2,0,0],['EVC_anteEQ1','EVC_anteEQ1_anteEQ2','No Mitigation'],
np.array([0, 50, 50, 2, 50, 2, 20]), np.array([2, 100, 100, 20, 100, 20, 200]), np.array([5, 5000, 5000, 100, 5000, 100, 2000]))
print b[0],b[1]
#final_sorting, sorting_distribution = decision_sorting('santorini/fhg.json',[0.2 for i in range(8)],['UPS (uninterrupted power supply)','Redundancy within grids','Reinforcement of vulnerable nodes','No Mitigation'],
# np.array([5, 5, 5, 5, 5, 5, 5,5]), np.array([50, 50, 50, 50, 50, 50, 50, 50]), np.array([500, 500, 500, 500, 500, 500, 500, 500]),
# np.array(([30, 25,20,34,30,20,30,20],[50,50,50,50,50,50,50,50],[1000, 20000,18000,2000,5000,5000,6000,5000])))
#print final_sorting
#final_ranking, ranking_distribution = decision_ranking('santorini/fhg.json',[0.2 for i in range(8)],['UPS (uninterrupted power supply)','Redundancy within grids','Reinforcement of vulnerable nodes','No Mitigation'],
# np.array([5, 5, 5, 5, 5, 5, 5,5]), np.array([50, 50, 50, 50, 50, 50, 50, 50]), np.array([500, 500, 500, 500, 500, 500, 500, 500]))
#print final_ranking
|
apache-2.0
|
huongttlan/statsmodels
|
statsmodels/graphics/tsaplots.py
|
16
|
10392
|
"""Correlation plot functions."""
import numpy as np
from statsmodels.graphics import utils
from statsmodels.tsa.stattools import acf, pacf
def plot_acf(x, ax=None, lags=None, alpha=.05, use_vlines=True, unbiased=False,
fft=False, **kwargs):
"""Plot the autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett's formula. If None, no confidence intervals are plotted.
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
fft : bool, optional
If True, computes the ACF via FFT.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
# acf has different return type based on alpha
if alpha is None:
acf_x = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
else:
acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def plot_pacf(x, ax=None, lags=None, alpha=.05, method='ywm',
use_vlines=True, **kwargs):
"""Plot the partial autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
if alpha is None:
acf_x = pacf(x, nlags=nlags, alpha=alpha, method=method)
else:
acf_x, confint = pacf(x, nlags=nlags, alpha=alpha, method=method)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
# center the confidence interval TODO: do in acf?
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Partial Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def seasonal_plot(grouped_x, xticklabels, ylabel=None, ax=None):
"""
Consider using one of month_plot or quarter_plot unless you need
irregular plotting.
Parameters
----------
grouped_x : iterable of DataFrames
Should be a GroupBy object (or similar pair of group_names and groups
as DataFrames) with a DatetimeIndex or PeriodIndex
"""
fig, ax = utils.create_mpl_ax(ax)
start = 0
ticks = []
for season, df in grouped_x:
df = df.copy() # or sort balks for series. may be better way
df.sort()
nobs = len(df)
x_plot = np.arange(start, start + nobs)
ticks.append(x_plot.mean())
ax.plot(x_plot, df.values, 'k')
ax.hlines(df.values.mean(), x_plot[0], x_plot[-1], colors='k')
start += nobs
ax.set_xticks(ticks)
ax.set_xticklabels(xticklabels)
ax.set_ylabel(ylabel)
ax.margins(.1, .05)
return fig
def month_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of monthly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.elnino.load_pandas().data
>>> dta['YEAR'] = dta.YEAR.astype(int).astype(str)
>>> dta = dta.set_index('YEAR').T.unstack()
>>> dates = map(lambda x : pd.datetools.parse('1 '+' '.join(x)),
... dta.index.values)
>>> dta.index = pd.DatetimeIndex(dates, freq='M')
>>> fig = sm.graphics.tsa.month_plot(dta)
.. plot:: plots/graphics_month_plot.py
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="M")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="M"))
xticklabels = ['j','f','m','a','m','j','j','a','s','o','n','d']
return seasonal_plot(x.groupby(lambda y : y.month), xticklabels,
ylabel=ylabel, ax=ax)
def quarter_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of quarterly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="Q")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="Q"))
xticklabels = ['q1', 'q2', 'q3', 'q4']
return seasonal_plot(x.groupby(lambda y : y.quarter), xticklabels,
ylabel=ylabel, ax=ax)
if __name__ == "__main__":
import pandas as pd
#R code to run to load that dataset in this directory
#data(co2)
#library(zoo)
#write.csv(as.data.frame(list(date=as.Date(co2), co2=coredata(co2))), "co2.csv", row.names=FALSE)
co2 = pd.read_csv("co2.csv", index_col=0, parse_dates=True)
month_plot(co2.co2)
#will work when dates are sorted
#co2 = sm.datasets.get_rdataset("co2", cache=True)
x = pd.Series(np.arange(20),
index=pd.PeriodIndex(start='1/1/1990', periods=20, freq='Q'))
quarter_plot(x)
|
bsd-3-clause
|
gallir/influxdb-python
|
examples/tutorial_pandas.py
|
10
|
1381
|
import argparse
import pandas as pd
from influxdb import DataFrameClient
def main(host='localhost', port=8086):
user = 'root'
password = 'root'
dbname = 'example'
client = DataFrameClient(host, port, user, password, dbname)
print("Create pandas DataFrame")
df = pd.DataFrame(data=list(range(30)),
index=pd.date_range(start='2014-11-16',
periods=30, freq='H'))
print("Create database: " + dbname)
client.create_database(dbname)
print("Write DataFrame")
client.write_points(df, 'demo')
print("Write DataFrame with Tags")
client.write_points(df, 'demo', {'k1': 'v1', 'k2': 'v2'})
print("Read DataFrame")
client.query("select * from demo")
print("Delete database: " + dbname)
client.delete_database(dbname)
def parse_args():
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname of InfluxDB http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port of InfluxDB http API')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
|
mit
|
idbedead/RNA-sequence-tools
|
Count_Parsing/count_matrix_stats.py
|
2
|
3657
|
import fnmatch
import os
import pandas as pd
import cPickle as pickle
import csv
from collections import OrderedDict
#list of file paths with mapped hits
pats = ['/netapp/home/idriver/count-picard_combined_ips17_BU3']
#output path
path = '/netapp/home/idriver/count-picard_combined_ips17_BU3'
#base name for final output count matrix and picard metrics
base_name = 'combined_spc'
#initialize dictonaries for collected output
fpkm_matrix_dict_g = OrderedDict()
count_dict = OrderedDict()
norm_read_dict = OrderedDict()
picard_stats_dict = OrderedDict()
#collect gene_list once since it the same between all samples
st = 1
gene_list = []
for p in pats:
for root, dirnames, filenames in os.walk(os.path.join(path,p)):
for filename in fnmatch.filter(filenames, '*_sorted.bam'):
#sorted file path
cname = root.split('/')[-1]
out = path
sort_out = os.path.join(out, cname, cname+'_sorted')
#fixmate file path
picard_fixmate_out = sort_out.strip('.bam')+'_FM.bam'
#format htseq-count command to generate raw counts from sorted accepted hits
hts_out = os.path.join(out,cname,cname+'_htseqcount.txt')
#run picard CollectRnaSeqMetrics (http://broadinstitute.github.io/picard/command-line-overview.html) and generate matrix of 3' to 5' bias (norm_read_dict)
picard_rnaseqmetric_out = sort_out.strip('sorted.bam')+'RNA_metric.txt'
picard_rnaseqchart_out = sort_out.strip('sorted.bam')+'RNA_metric.pdf'
g_counts = []
with open(hts_out, mode='r') as infile:
hts_tab = csv.reader(infile, delimiter = '\t')
print st
for l in hts_tab:
if st == 1:
gene_list.append(l[0])
g_counts.append(l[1])
st = 2
print len(g_counts)
print len(gene_list)
count_dict[cname] = g_counts
norm_read_dict[cname] = []
index3 = []
with open(picard_rnaseqmetric_out, mode='r') as infile:
pic_tab = csv.reader(infile, delimiter = '\t')
for i, l in enumerate(pic_tab):
if i == 6:
index1 = l
if i == 7:
num_stats = []
for n in l:
if n == '' or n == '?':
num_stats.append(0.0)
else:
num_stats.append(float(n))
picard_stats_dict[cname] = num_stats
if i == 10:
index2 = l
if i > 10 and i <= 111:
index3.append(int(l[0]))
norm_read_dict[cname].append(float(l[1]))
for k, v in norm_read_dict.items():
if len(v) == 0:
norm_read_dict[k] = [0 for x in range(101)]
print norm_read_dict[k], len(norm_read_dict[k])
#form pandas dataframe of each and save as tab delimited file
count_df = pd.DataFrame(count_dict, index = gene_list)
count_df.to_csv(os.path.join(path,base_name+'_count_table.txt'), sep = '\t')
with open(os.path.join(path,'htseq_count_'+base_name+'.p'), 'wb') as fp1:
pickle.dump(count_df, fp1)
pic_stats_df = pd.DataFrame(picard_stats_dict, index = index1)
pic_stats_df.to_csv(os.path.join(path,base_name+'_picard_stats.txt'), sep = '\t')
norm_read_df = pd.DataFrame(norm_read_dict, index = index3)
norm_read_df.to_csv(os.path.join(path,base_name+'_read_bias.txt'), sep = '\t')
|
mit
|
willo12/spacegrids
|
setup.py
|
1
|
1211
|
import os
#from distutils.core import setup
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname )).read()
#with open('README.rst') as file:
# long_description = file.read()
setup(name='spacegrids',
version='1.9',
author='Willem Sijp',
author_email='[email protected]',
description='numpy array with grids and associated operations',
download_url="https://github.com/willo12/spacegrids/tarball/1.8",
keywords=('climate data','grid data','data on grids','spatial grids', 'Netcdf data analysis', 'climate analysis scripts',"interpreting meta data Netcdf","geophysics tools"),
packages = find_packages(exclude="tests"),
classifiers = [],
# package_data = {
# "spacegrids": ['README.rst']
# },
long_description=read('README.rst'),
url='https://github.com/willo12/spacegrids',
license = "BSD",
# install_requires = ["numpy>=1.6","scipy>=0.10","matplotlib>=1.1"]
install_requires = []
# extras_require = {
# "ncio": ["netCDF4>=1.0.6"],
# "pandas": ["pandas>=0.8.0"],
# "plotting": ["pandas>=0.8.0","matplotlib>=1.2.1"],
# "interp2d": ["basemap>=1.06"],
# }
)
|
bsd-3-clause
|
daortizh/incubator-spot
|
spot-oa/oa/proxy/proxy_oa.py
|
4
|
16504
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import json
import shutil
import sys
import datetime
import csv, math
from collections import OrderedDict
from utils import Util
from components.data.data import Data
from components.iana.iana_transform import IanaTransform
from components.nc.network_context import NetworkContext
from multiprocessing import Process
import pandas as pd
import time
import md5
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.PROXY') if logger else Util.get_logger('OA.PROXY',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "proxy"
self._proxy_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._proxy_scores = []
self._proxy_scores_headers = []
self._proxy_extra_columns = []
self._results_delimiter = '\t'
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# get scores fields conf
conf_file = "{0}/proxy_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
self._engine = Data(self._db, self._table_name,self._logger)
def start(self):
####################
start = time.time()
####################
self._create_folder_structure()
self._add_ipynb()
self._get_proxy_results()
self._add_reputation()
self._add_severity()
self._add_iana()
self._add_network_context()
self._add_hash()
self._create_proxy_scores_csv()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _create_folder_structure(self):
# create date folder structure if it does not exist.
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("proxy",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding edge investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Edge_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Edge_Investigation.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_proxy_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
proxy_results = "{0}/proxy_results.csv".format(self._data_path)
# get hdfs path from conf file.
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/proxy/scored_results/{1}/scores/proxy_results.csv".format(HUSER,self._date)
# get results file from hdfs.
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
# valdiate files exists
if os.path.isfile(proxy_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} proxy results file: {1}".format(self._date,proxy_results))
self._proxy_results = Util.read_results(proxy_results,self._limit,self._results_delimiter)[:]
if len(self._proxy_results) == 0: self._logger.error("There are not proxy results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# add headers.
self._logger.info("Adding headers")
self._proxy_scores_headers = [ str(key) for (key,value) in self._conf['proxy_score_fields'].items() ]
self._proxy_scores = self._proxy_results[:]
def _create_proxy_scores_csv(self):
proxy_scores_csv = "{0}/proxy_scores.tsv".format(self._data_path)
proxy_scores_final = self._proxy_scores[:];
proxy_scores_final.insert(0,self._proxy_scores_headers)
Util.create_csv_file(proxy_scores_csv,proxy_scores_final, self._results_delimiter)
# create bk file
proxy_scores_bu_csv = "{0}/proxy_scores_bu.tsv".format(self._data_path)
Util.create_csv_file(proxy_scores_bu_csv,proxy_scores_final, self._results_delimiter)
def _add_reputation(self):
# read configuration.
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
# initialize reputation services.
self._rep_services = []
self._logger.info("Initializing reputation services.")
for service in rep_conf:
config = rep_conf[service]
module = __import__("components.reputation.{0}.{0}".format(service), fromlist=['Reputation'])
self._rep_services.append(module.Reputation(config,self._logger))
# get columns for reputation.
rep_cols = {}
indexes = [ int(value) for key, value in self._conf["add_reputation"].items()]
self._logger.info("Getting columns to add reputation based on config file: proxy_conf.json".format())
for index in indexes:
col_list = []
for conn in self._proxy_scores:
col_list.append(conn[index])
rep_cols[index] = list(set(col_list))
# get reputation per column.
self._logger.info("Getting reputation for each service in config")
rep_services_results = []
if self._rep_services :
for key,value in rep_cols.items():
rep_services_results = [ rep_service.check(None,value,True) for rep_service in self._rep_services]
rep_results = {}
for result in rep_services_results:
rep_results = {k: "{0}::{1}".format(rep_results.get(k, ""), result.get(k, "")).strip('::') for k in set(rep_results) | set(result)}
self._proxy_scores = [ conn + [ rep_results[conn[key]] ] for conn in self._proxy_scores ]
else:
self._proxy_scores = [ conn + [""] for conn in self._proxy_scores ]
def _add_severity(self):
# Add severity column
self._proxy_scores = [conn + [0] for conn in self._proxy_scores]
def _add_iana(self):
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
proxy_iana = IanaTransform(iana_config["IANA"])
proxy_rcode_index = self._conf["proxy_score_fields"]["respcode"]
self._proxy_scores = [ conn + [ proxy_iana.get_name(conn[proxy_rcode_index],"proxy_http_rcode")] for conn in self._proxy_scores ]
else:
self._proxy_scores = [ conn + [""] for conn in self._proxy_scores ]
def _add_network_context(self):
nc_conf_file = "{0}/components/nc/nc_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(nc_conf_file):
nc_conf = json.loads(open(nc_conf_file).read())["NC"]
proxy_nc = NetworkContext(nc_conf,self._logger)
ip_dst_index = self._conf["proxy_score_fields"]["clientip"]
self._proxy_scores = [ conn + [proxy_nc.get_nc(conn[ip_dst_index])] for conn in self._proxy_scores ]
else:
self._proxy_scores = [ conn + [""] for conn in self._proxy_scores ]
def _add_hash(self):
#A hash string is generated to be used as the file name for the edge files.
#These fields are used for the hash creation, so this combination of values is treated as
#a 'unique' connection
cip_index = self._conf["proxy_score_fields"]["clientip"]
uri_index = self._conf["proxy_score_fields"]["fulluri"]
tme_index = self._conf["proxy_score_fields"]["p_time"]
self._proxy_scores = [conn + [str( md5.new(str(conn[cip_index]) + str(conn[uri_index])).hexdigest() + str((conn[tme_index].split(":"))[0]) )] for conn in self._proxy_scores]
def _get_oa_details(self):
self._logger.info("Getting OA Proxy suspicious details")
# start suspicious connects details process.
p_sp = Process(target=self._get_suspicious_details)
p_sp.start()
# p_sp.join()
def _get_suspicious_details(self):
hash_list = []
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
proxy_iana = IanaTransform(iana_config["IANA"])
for conn in self._proxy_scores:
conn_hash = conn[self._conf["proxy_score_fields"]["hash"]]
if conn_hash not in hash_list:
hash_list.append(conn_hash)
clientip = conn[self._conf["proxy_score_fields"]["clientip"]]
fulluri = conn[self._conf["proxy_score_fields"]["fulluri"]]
date=conn[self._conf["proxy_score_fields"]["p_date"]].split('-')
if len(date) == 3:
year=date[0]
month=date[1].zfill(2)
day=date[2].zfill(2)
hh=(conn[self._conf["proxy_score_fields"]["p_time"]].split(":"))[0]
self._get_proxy_details(fulluri,clientip,conn_hash,year,month,day,hh,proxy_iana)
def _get_proxy_details(self,fulluri,clientip,conn_hash,year,month,day,hh,proxy_iana):
limit = 250
output_delimiter = '\t'
edge_file ="{0}/edge-{1}-{2}.tsv".format(self._data_path,clientip,conn_hash)
edge_tmp ="{0}/edge-{1}-{2}.tmp".format(self._data_path,clientip,conn_hash)
if not os.path.isfile(edge_file):
proxy_qry = ("SELECT p_date, p_time, clientip, host, webcat, respcode, reqmethod, useragent, resconttype, \
referer, uriport, serverip, scbytes, csbytes, fulluri FROM {0}.{1} WHERE y=\'{2}\' AND m=\'{3}\' AND d=\'{4}\' AND \
h=\'{5}\' AND fulluri =\'{6}\' AND clientip = \'{7}\' LIMIT {8};").format(self._db,self._table_name, year,month,day,hh,fulluri,clientip,limit)
# execute query
self._engine.query(proxy_qry,edge_tmp,output_delimiter)
# add IANA to results.
self._logger.info("Adding IANA translation to details results")
with open(edge_tmp) as proxy_details_csv:
rows = csv.reader(proxy_details_csv, delimiter=output_delimiter,quotechar='"')
next(proxy_details_csv)
update_rows = [[conn[0]] + [conn[1]] + [conn[2]] + [conn[3]] + [conn[4]] + [proxy_iana.get_name(conn[5],"proxy_http_rcode") if proxy_iana else conn[5]] + [conn[6]] + [conn[7]] + [conn[8]] + [conn[9]] + [conn[10]] + [conn[11]] + [conn[12]] + [conn[13]] + [conn[14]] if len(conn) > 0 else [] for conn in rows]
update_rows = filter(None, update_rows)
header = ["p_date","p_time","clientip","host","webcat","respcode","reqmethod","useragent","resconttype","referer","uriport","serverip","scbytes","csbytes","fulluri"]
update_rows.insert(0,header)
# due an issue with the output of the query.
update_rows = [ [ w.replace('"','') for w in l ] for l in update_rows ]
# create edge file.
self._logger.info("Creating edge file:{0}".format(edge_file))
with open(edge_file,'wb') as proxy_details_edge:
writer = csv.writer(proxy_details_edge, quoting=csv.QUOTE_NONE, delimiter=output_delimiter)
if update_rows:
writer.writerows(update_rows)
else:
shutil.copy(edge_tmp,edge_file)
try:
os.remove(edge_tmp)
except OSError:
pass
def _ingest_summary(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
self._logger.info("Getting ingest summary data for the day")
ingest_summary_cols = ["date","total"]
result_rows = []
df_filtered = pd.DataFrame()
ingest_summary_file = "{0}/is_{1}{2}.csv".format(self._ingest_summary_path,yr,mn)
ingest_summary_tmp = "{0}.tmp".format(ingest_summary_file)
if os.path.isfile(ingest_summary_file):
df = pd.read_csv(ingest_summary_file, delimiter=',')
#discards previous rows from the same date
df_filtered = df[df['date'].str.contains("{0}-{1}-{2}".format(yr, mn, dy)) == False]
else:
df = pd.DataFrame()
# get ingest summary.
ingest_summary_qry = ("SELECT p_date, p_time, COUNT(*) as total "
" FROM {0}.{1}"
" WHERE y='{2}' AND m='{3}' AND d='{4}' "
" AND p_date IS NOT NULL AND p_time IS NOT NULL "
" AND clientip IS NOT NULL AND p_time != '' "
" AND host IS NOT NULL AND fulluri IS NOT NULL "
" GROUP BY p_date, p_time;")
ingest_summary_qry = ingest_summary_qry.format(self._db,self._table_name, yr, mn, dy)
results_file = "{0}/results_{1}.csv".format(self._ingest_summary_path,self._date)
self._engine.query(ingest_summary_qry,output_file=results_file,delimiter=",")
if os.path.isfile(results_file):
df_results = pd.read_csv(results_file, delimiter=',')
#Forms a new dataframe splitting the minutes from the time column/
df_new = pd.DataFrame([["{0} {1}:{2}".format(val['p_date'], val['p_time'].split(":")[0].zfill(2), val['p_time'].split(":")[1].zfill(2)), int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df_results.iterrows()],columns = ingest_summary_cols)
#Groups the data by minute
sf = df_new.groupby(by=['date'])['total'].sum()
df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
df_final = df_filtered.append(df_per_min, ignore_index=True)
df_final.to_csv(ingest_summary_tmp,sep=',', index=False)
os.remove(results_file)
os.rename(ingest_summary_tmp,ingest_summary_file)
else:
self._logger.info("No data found for the ingest summary")
|
apache-2.0
|
paultopia/auto-sklearn
|
autosklearn/models/nested_cv_evaluator.py
|
5
|
8570
|
from collections import defaultdict
import numpy as np
import sklearn.utils
from autosklearn.data.split_data import get_CV_fold
from autosklearn.models.evaluator import Evaluator, calculate_score
class NestedCVEvaluator(Evaluator):
def __init__(self, Datamanager, configuration, with_predictions=False,
all_scoring_functions=False, seed=1, output_dir=None,
output_y_test=False, inner_cv_folds=5, outer_cv_folds=5,
num_run=None):
super(NestedCVEvaluator, self).__init__(
Datamanager, configuration, with_predictions=with_predictions,
all_scoring_functions=all_scoring_functions, seed=seed,
output_dir=output_dir, output_y_test=output_y_test, num_run=num_run)
self.inner_cv_folds = inner_cv_folds
self.outer_cv_folds = outer_cv_folds
self.Y_optimization = None
self.outer_models = [None] * outer_cv_folds
self.inner_models = [None] * outer_cv_folds
self.X_train = Datamanager.data["X_train"]
self.Y_train = Datamanager.data["Y_train"]
for i in range(outer_cv_folds):
self.inner_models[i] = [None] * inner_cv_folds
self.outer_indices = [None] * outer_cv_folds
self.inner_indices = [None] * outer_cv_folds
for i in range(outer_cv_folds):
self.inner_indices[i] = [None] * inner_cv_folds
self.random_state = sklearn.utils.check_random_state(seed)
def fit(self):
seed = self.random_state.randint(1000000)
for outer_fold in range(self.outer_cv_folds):
# First perform the fit for the outer cross validation
outer_train_indices, outer_test_indices = \
get_CV_fold(self.X_train, self.Y_train, fold=outer_fold,
folds=self.outer_cv_folds, shuffle=True,
random_state=seed)
self.outer_indices[outer_fold] = ((outer_train_indices,
outer_test_indices))
model = self.model_class(self.configuration, self.random_state)
self.outer_models[outer_fold] = model
self.outer_models[outer_fold].fit(self.X_train[outer_train_indices],
self.Y_train[outer_train_indices])
# Then perform the fit for the inner cross validation
for inner_fold in range(self.inner_cv_folds):
X_train = self.X_train[outer_train_indices]
Y_train = self.Y_train[outer_train_indices]
inner_train_indices, inner_test_indices = \
get_CV_fold(X_train, Y_train,
fold=inner_fold, folds=self.inner_cv_folds,
shuffle=True, random_state=seed)
inner_train_indices = outer_train_indices[inner_train_indices]
inner_test_indices = outer_train_indices[inner_test_indices]
X_train = self.X_train[inner_train_indices]
Y_train = self.Y_train[inner_train_indices]
self.inner_indices[outer_fold][inner_fold] = \
((inner_train_indices, inner_test_indices))
model = self.model_class(self.configuration, self.random_state)
model = model.fit(X_train, Y_train)
self.inner_models[outer_fold][inner_fold] = model
def predict(self):
# First, obtain the predictions for the ensembles, the validation and
# the test set!
outer_scores = defaultdict(list)
inner_scores = defaultdict(list)
Y_optimization_pred = [None] * self.outer_cv_folds
Y_targets = [None] * self.outer_cv_folds
Y_valid_pred = [None] * self.outer_cv_folds
Y_test_pred = [None] * self.outer_cv_folds
for i in range(self.outer_cv_folds):
train_indices, test_indices = self.outer_indices[i]
opt_pred = self.predict_function(self.X_train[test_indices],
self.outer_models[i],
self.task_type,
Y_train=self.Y_train[train_indices])
Y_optimization_pred[i] = opt_pred
Y_targets[i] = self.Y_train[test_indices]
if self.X_valid is not None:
X_valid = self.X_valid.copy()
valid_pred = self.predict_function(X_valid,
self.outer_models[i],
self.task_type,
Y_train=self.Y_train[train_indices])
Y_valid_pred[i] = valid_pred
if self.X_test is not None:
X_test = self.X_test.copy()
test_pred = self.predict_function(X_test, self.outer_models[i],
self.task_type,
Y_train=self.Y_train[train_indices])
Y_test_pred[i] = test_pred
# Calculate the outer scores
for i in range(self.outer_cv_folds):
scores = calculate_score(Y_targets[i], Y_optimization_pred[i],
self.task_type, self.metric,
self.D.info['target_num'],
all_scoring_functions=self.all_scoring_functions)
if self.all_scoring_functions:
for score_name in scores:
outer_scores[score_name].append(scores[score_name])
else:
outer_scores[self.metric].append(scores)
Y_optimization_pred = np.concatenate([Y_optimization_pred[i] for i in
range(self.outer_cv_folds) if
Y_optimization_pred[
i] is not None])
Y_targets = np.concatenate([Y_targets[i] for i in range(self.outer_cv_folds)
if Y_targets[i] is not None])
if self.X_valid is not None:
Y_valid_pred = np.array([Y_valid_pred[i] for i in range(
self.outer_cv_folds) if Y_valid_pred[i] is not None])
# Average the predictions of several models
if len(Y_valid_pred.shape) == 3:
Y_valid_pred = np.nanmean(Y_valid_pred, axis=0)
if self.X_test is not None:
Y_test_pred = np.array([Y_test_pred[i] for i in range(
self.outer_cv_folds) if Y_test_pred[i] is not None])
# Average the predictions of several models
if len(Y_test_pred.shape) == 3:
Y_test_pred = np.nanmean(Y_test_pred, axis=0)
self.Y_optimization = Y_targets
# Second, calculate the inner score
for outer_fold in range(self.outer_cv_folds):
for inner_fold in range(self.inner_cv_folds):
inner_train_indices, inner_test_indices = self.inner_indices[
outer_fold][inner_fold]
Y_test = self.Y_train[inner_test_indices]
X_test = self.X_train[inner_test_indices]
model = self.inner_models[outer_fold][inner_fold]
Y_hat = self.predict_function(X_test, model, self.task_type,
Y_train=self.Y_train[inner_train_indices])
scores = calculate_score(Y_test, Y_hat, self.task_type, self.metric,
self.D.info['target_num'],
all_scoring_functions=self.all_scoring_functions)
if self.all_scoring_functions:
for score_name in scores:
inner_scores[score_name].append(scores[score_name])
else:
inner_scores[self.metric].append(scores)
# Average the scores!
if self.all_scoring_functions:
inner_err = {key: 1 - np.mean(inner_scores[key])
for key in inner_scores}
outer_err = {"outer:%s" % key: 1 - np.mean(outer_scores[key])
for key in outer_scores}
inner_err.update(outer_err)
else:
inner_err = 1 - np.mean(inner_scores[self.metric])
if self.with_predictions:
return inner_err, Y_optimization_pred, Y_valid_pred, Y_test_pred
return inner_err
|
bsd-3-clause
|
hrjn/scikit-learn
|
sklearn/model_selection/_validation.py
|
6
|
38471
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
predictions_ = np.zeros((X_test.shape[0], n_classes))
if method == 'decision_function' and len(estimator.classes_) == 2:
predictions_[:, estimator.classes_[-1]] = predictions
else:
predictions_[:, estimator.classes_] = predictions
predictions = predictions_
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
bsd-3-clause
|
tomlof/scikit-learn
|
sklearn/metrics/setup.py
|
69
|
1061
|
import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.pyx"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
morrislab/rnascan
|
setup.py
|
1
|
2337
|
import os.path
import sys
from setuptools import find_packages
from distutils.core import setup, Extension
if sys.version_info < (2, 7):
sys.stderr.write("rnascan requires Python 2.7, or Python 3.5 or later. "
"Python %d.%d detected.\n" % sys.version_info[:2])
sys.exit(1)
elif sys.version_info[0] == 3 and sys.version_info[:2] < (3, 5):
sys.stderr.write("rnascan requires Python 2.7, or Python 3.5 or later. "
"Python %d.%d detected.\n" % sys.version_info[:2])
sys.exit(1)
# Borrowing setup.py code from Biopython
def is_pypy():
import platform
try:
if platform.python_implementation() == 'PyPy':
return True
except AttributeError:
# New in Python 2.6, not in Jython yet either
pass
return False
def can_import(module_name):
"""can_import(module_name) -> module or None"""
try:
return __import__(module_name)
except ImportError:
return None
def is_Numpy_installed():
if is_pypy():
return False
return bool(can_import("numpy"))
EXTENSIONS = []
if is_Numpy_installed():
import numpy
numpy_include_dir = numpy.get_include()
EXTENSIONS.append(
Extension('rnascan.BioAddons.motifs._pwm',
["rnascan/BioAddons/motifs/_pwm.c"],
include_dirs=[numpy_include_dir],
))
here = os.path.abspath(os.path.dirname(__file__))
exec(open(os.path.join(here, 'rnascan/version.py')).read())
setup(name='rnascan',
version=__version__,
description='Scan RBP motifs and secondary structure from SSMs',
url='http://github.com/morrislab/rnascan',
author='Kevin Ha, Kate Cook, Kaitlin Laverty',
author_email='[email protected], [email protected], [email protected]',
license='AGPLv3',
packages=find_packages(),
scripts=['scripts/run_folding'],
install_requires=['setuptools',
'pandas >= 0.24',
'numpy >= 1.10.0',
'biopython >= 1.66'],
entry_points={
'console_scripts': [
'rnascan = rnascan.rnascan:main'
]
},
ext_modules=EXTENSIONS,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose']
)
|
agpl-3.0
|
wgmueller1/pyhsmm
|
pyhsmm/internals/hmm_states.py
|
1
|
23209
|
from __future__ import division
import numpy as np
from numpy import newaxis as na
import abc
import copy
from scipy.misc import logsumexp
from pyhsmm.util.stats import sample_discrete
try:
from pyhsmm.util.cstats import sample_markov, count_transitions
except ImportError:
from pyhsmm.util.stats import sample_markov, count_transitions
from pyhsmm.util.general import rle
######################
# Mixins and bases #
######################
class _StatesBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self,model,T=None,data=None,stateseq=None,
generate=True,initialize_from_prior=True):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.clear_caches()
if stateseq is not None:
self.stateseq = np.array(stateseq,dtype=np.int32)
elif generate:
if data is not None and not initialize_from_prior:
self.resample()
else:
self.generate_states()
def copy_sample(self,newmodel):
new = copy.copy(self)
new.clear_caches() # saves space, though may recompute later for likelihoods
new.model = newmodel
new.stateseq = self.stateseq.copy()
return new
_kwargs = {} # used in subclasses for joblib stuff
### model properties
@property
def obs_distns(self):
return self.model.obs_distns
@property
def trans_matrix(self):
return self.model.trans_distn.trans_matrix
@property
def pi_0(self):
return self.model.init_state_distn.pi_0
@property
def num_states(self):
return self.model.num_states
### convenience properties
@property
def stateseq_norep(self):
return rle(self.stateseq)[0]
@property
def durations(self):
return rle(self.stateseq)[1]
### generation
@abc.abstractmethod
def generate_states(self):
pass
### messages and likelihoods
# some cached things depends on model parameters, so caches should be
# cleared when the model changes (e.g. when parameters are updated)
def clear_caches(self):
self._aBl = self._mf_aBl = None
self._normalizer = None
@property
def aBl(self):
if self._aBl is None:
data = self.data
aBl = self._aBl = np.empty((data.shape[0],self.num_states))
for idx, obs_distn in enumerate(self.obs_distns):
aBl[:,idx] = obs_distn.log_likelihood(data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.
return self._aBl
@abc.abstractmethod
def log_likelihood(self):
pass
class _SeparateTransMixin(object):
def __init__(self,group_id,**kwargs):
assert not isinstance(group_id,np.ndarray)
self.group_id = group_id
self._kwargs = dict(self._kwargs,group_id=group_id)
super(_SeparateTransMixin,self).__init__(**kwargs)
# access these to be sure they're instantiated
self.trans_matrix
self.pi_0
@property
def trans_matrix(self):
return self.model.trans_distns[self.group_id].trans_matrix
@property
def pi_0(self):
return self.model.init_state_distns[self.group_id].pi_0
@property
def mf_trans_matrix(self):
return np.maximum(
self.model.trans_distns[self.group_id].exp_expected_log_trans_matrix,
1e-3)
@property
def mf_pi_0(self):
return self.model.init_state_distns[self.group_id].exp_expected_log_init_state_distn
class _PossibleChangepointsMixin(object):
def __init__(self,model,data,changepoints=None,**kwargs):
changepoints = changepoints if changepoints is not None \
else [(t,t+1) for t in xrange(data.shape[0])]
self.changepoints = changepoints
self.segmentstarts = np.array([start for start,stop in changepoints],dtype=np.int32)
self.segmentlens = np.array([stop-start for start,stop in changepoints],dtype=np.int32)
assert all(l > 0 for l in self.segmentlens)
assert sum(self.segmentlens) == data.shape[0]
assert self.changepoints[0][0] == 0 and self.changepoints[-1][-1] == data.shape[0]
self._kwargs = dict(self._kwargs,changepoints=changepoints)
super(_PossibleChangepointsMixin,self).__init__(
model,T=len(changepoints),data=data,**kwargs)
def clear_caches(self):
self._aBBl = self._mf_aBBl = None
self._stateseq = None
super(_PossibleChangepointsMixin,self).clear_caches()
@property
def Tblock(self):
return len(self.changepoints)
@property
def Tfull(self):
return self.data.shape[0]
@property
def stateseq(self):
if self._stateseq is None:
self._stateseq = self.blockstateseq.repeat(self.segmentlens)
return self._stateseq
@stateseq.setter
def stateseq(self,stateseq):
assert len(stateseq) == self.Tblock or len(stateseq) == self.Tfull
if len(stateseq) == self.Tblock:
self.blockstateseq = stateseq
else:
self.blockstateseq = stateseq[self.segmentstarts]
self._stateseq = None
def _expected_states(self,*args,**kwargs):
expected_states = \
super(_PossibleChangepointsMixin,self)._expected_states(*args,**kwargs)
return expected_states.repeat(self.segmentlens,axis=0)
@property
def aBl(self):
if self._aBBl is None:
aBl = super(_PossibleChangepointsMixin,self).aBl
aBBl = self._aBBl = np.empty((self.Tblock,self.num_states))
for idx, (start,stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._aBBl
@property
def mf_aBl(self):
if self._mf_aBBl is None:
aBl = super(_PossibleChangepointsMixin,self).mf_aBl
aBBl = self._mf_aBBl = np.empty((self.Tblock,self.num_states))
for idx, (start,stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._mf_aBBl
def plot(self,*args,**kwargs):
from matplotlib import pyplot as plt
super(_PossibleChangepointsMixin,self).plot(*args,**kwargs)
plt.xlim((0,self.Tfull))
# TODO do generate() and generate_states() actually work?
####################
# States classes #
####################
class HMMStatesPython(_StatesBase):
### generation
def generate_states(self):
T = self.T
nextstate_distn = self.pi_0
A = self.trans_matrix
stateseq = np.zeros(T,dtype=np.int32)
for idx in xrange(T):
stateseq[idx] = sample_discrete(nextstate_distn)
nextstate_distn = A[stateseq[idx]]
self.stateseq = stateseq
return stateseq
### message passing
def log_likelihood(self):
if self._normalizer is None:
self.messages_forwards_normalized() # NOTE: sets self._normalizer
return self._normalizer
def _messages_log(self,trans_matrix,init_state_distn,log_likelihoods):
alphal = self._messages_forwards_log(trans_matrix,init_state_distn,log_likelihoods)
betal = self._messages_backwards_log(trans_matrix,log_likelihoods)
return alphal, betal
def messages_log(self):
return self._messages_log(self.trans_matrix,self.pi_0,self.aBl)
@staticmethod
def _messages_backwards_log(trans_matrix,log_likelihoods):
errs = np.seterr(over='ignore')
Al = np.log(trans_matrix)
aBl = log_likelihoods
betal = np.zeros_like(aBl)
for t in xrange(betal.shape[0]-2,-1,-1):
betal[t] = logsumexp(Al + betal[t+1] + aBl[t+1],axis=1)
np.seterr(**errs)
return betal
def messages_backwards_log(self):
betal = self._messages_backwards_log(self.trans_matrix,self.aBl)
assert not np.isnan(betal).any()
self._normalizer = logsumexp(np.log(self.pi_0) + betal[0] + self.aBl[0])
return betal
@staticmethod
def _messages_forwards_log(trans_matrix,init_state_distn,log_likelihoods):
errs = np.seterr(over='ignore')
Al = np.log(trans_matrix)
aBl = log_likelihoods
alphal = np.zeros_like(aBl)
alphal[0] = np.log(init_state_distn) + aBl[0]
for t in xrange(alphal.shape[0]-1):
alphal[t+1] = logsumexp(alphal[t] + Al.T,axis=1) + aBl[t+1]
np.seterr(**errs)
return alphal
def messages_forwards_log(self):
alphal = self._messages_forwards_log(self.trans_matrix,self.pi_0,self.aBl)
assert not np.any(np.isnan(alphal))
self._normalizer = logsumexp(alphal[-1])
return alphal
@staticmethod
def _messages_backwards_normalized(trans_matrix,init_state_distn,log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
betan = np.empty_like(aBl)
logtot = 0.
betan[-1] = 1.
for t in xrange(T-2,-1,-1):
cmax = aBl[t+1].max()
betan[t] = A.dot(betan[t+1] * np.exp(aBl[t+1] - cmax))
norm = betan[t].sum()
logtot += cmax + np.log(norm)
betan[t] /= norm
cmax = aBl[0].max()
logtot += cmax + np.log((np.exp(aBl[0] - cmax) * init_state_distn * betan[0]).sum())
return betan, logtot
def messages_backwards_normalized(self):
betan, self._normalizer = \
self._messages_backwards_normalized(self.trans_matrix,self.pi_0,self.aBl)
return betan
@staticmethod
def _messages_forwards_normalized(trans_matrix,init_state_distn,log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
alphan = np.empty_like(aBl)
logtot = 0.
in_potential = init_state_distn
for t in xrange(T):
cmax = aBl[t].max()
alphan[t] = in_potential * np.exp(aBl[t] - cmax)
norm = alphan[t].sum()
if norm != 0:
alphan[t] /= norm
logtot += np.log(norm) + cmax
else:
alphan[t:] = 0.
return alphan, -np.inf
in_potential = alphan[t].dot(A)
return alphan, logtot
def messages_forwards_normalized(self):
alphan, self._normalizer = \
self._messages_forwards_normalized(self.trans_matrix,self.pi_0,self.aBl)
return alphan
### Gibbs sampling
def resample_log(self):
betal = self.messages_backwards_log()
self.sample_forwards_log(betal)
def resample_normalized(self):
alphan = self.messages_forwards_normalized()
self.sample_backwards_normalized(alphan)
def resample(self):
return self.resample_normalized()
@staticmethod
def _sample_forwards_log(betal,trans_matrix,init_state_distn,log_likelihoods):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T,dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in xrange(T):
logdomain = betal[idx] + aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
if np.any(np.isfinite(logdomain)):
stateseq[idx] = sample_discrete(nextstate_unsmoothed * np.exp(logdomain - np.amax(logdomain)))
else:
stateseq[idx] = sample_discrete(nextstate_unsmoothed)
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_log(self,betal):
self.stateseq = self._sample_forwards_log(betal,self.trans_matrix,self.pi_0,self.aBl)
@staticmethod
def _sample_forwards_normalized(betan,trans_matrix,init_state_distn,log_likelihoods):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T,dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in xrange(T):
logdomain = aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
stateseq[idx] = sample_discrete(nextstate_unsmoothed * betan * np.exp(logdomain - np.amax(logdomain)))
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_normalized(self,betan):
self.stateseq = self._sample_forwards_normalized(
betan,self.trans_matrix,self.pi_0,self.aBl)
@staticmethod
def _sample_backwards_normalized(alphan,trans_matrix_transpose):
AT = trans_matrix_transpose
T = alphan.shape[0]
stateseq = np.empty(T,dtype=np.int32)
next_potential = np.ones(AT.shape[0])
for t in xrange(T-1,-1,-1):
stateseq[t] = sample_discrete(next_potential * alphan[t])
next_potential = AT[stateseq[t]]
return stateseq
def sample_backwards_normalized(self,alphan):
self.stateseq = self._sample_backwards_normalized(alphan,self.trans_matrix.T.copy())
### Mean Field
@property
def mf_aBl(self):
if self._mf_aBl is None:
T = self.data.shape[0]
self._mf_aBl = aBl = np.empty((T,self.num_states))
for idx, o in enumerate(self.obs_distns):
aBl[:,idx] = o.expected_log_likelihood(self.data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.
return self._mf_aBl
@property
def mf_trans_matrix(self):
return self.model.trans_distn.exp_expected_log_trans_matrix
@property
def mf_pi_0(self):
return self.model.init_state_distn.exp_expected_log_init_state_distn
@property
def all_expected_stats(self):
return self.expected_states, self.expected_transcounts, self._normalizer
@all_expected_stats.setter
def all_expected_stats(self,vals):
self.expected_states, self.expected_transcounts, self._normalizer = vals
self.stateseq = self.expected_states.argmax(1).astype('int32') # for plotting
def meanfieldupdate(self):
self.clear_caches()
self.all_expected_stats = self._expected_statistics(
self.mf_trans_matrix,self.mf_pi_0,self.mf_aBl)
self._mf_param_snapshot = (
np.log(self.mf_trans_matrix), np.log(self.mf_pi_0),
self.mf_aBl, self._normalizer)
def _init_mf_from_gibbs(self):
expected_states = np.eye(self.num_states)[self.stateseq]
expected_transcounts = count_transitions(self.stateseq, self.num_states)
self.all_expected_stats = \
expected_states, expected_transcounts, -np.inf
def get_vlb(self, most_recently_updated=False):
if (self._normalizer is None) or (self._mf_param_snapshot is None) \
or not hasattr(self, 'expected_states') \
or not hasattr(self, 'expected_transcounts'):
self.meanfieldupdate()
# see https://github.com/mattjj/pyhsmm/issues/45#issuecomment-102721960
if most_recently_updated:
return self._normalizer
else:
# TODO TODO something wrong in here
_, _, new_normalizer = self._expected_statistics(
self.mf_trans_matrix, self.mf_pi_0, self.mf_aBl)
new_params = np.log(self.mf_trans_matrix), np.log(self.mf_pi_0), \
self.mf_aBl
old_params, old_normalizer = self._mf_param_snapshot[:3], \
self._mf_param_snapshot[-1]
E_stats = self.expected_transcounts, \
self.expected_states[0], self.expected_states
linear_term = \
sum(np.dot(np.ravel(a-b), np.ravel(c))
for a, b, c in zip(new_params, old_params, E_stats))
return linear_term - (new_normalizer - old_normalizer)
def _expected_statistics(self,trans_potential,init_potential,likelihood_log_potential):
alphal = self._messages_forwards_log(trans_potential,init_potential,
likelihood_log_potential)
betal = self._messages_backwards_log(trans_potential,likelihood_log_potential)
expected_states, expected_transcounts, normalizer = \
self._expected_statistics_from_messages(trans_potential,likelihood_log_potential,alphal,betal)
assert not np.isinf(expected_states).any()
return expected_states, expected_transcounts, normalizer
@staticmethod
def _expected_statistics_from_messages(trans_potential,likelihood_log_potential,alphal,betal):
expected_states = alphal + betal
expected_states -= expected_states.max(1)[:,na]
np.exp(expected_states,out=expected_states)
expected_states /= expected_states.sum(1)[:,na]
Al = np.log(trans_potential)
log_joints = alphal[:-1,:,na] + (betal[1:,na,:] + likelihood_log_potential[1:,na,:]) + Al[na,...]
log_joints -= log_joints.max((1,2))[:,na,na]
joints = np.exp(log_joints)
joints /= joints.sum((1,2))[:,na,na] # NOTE: renormalizing each isnt really necessary
expected_transcounts = joints.sum(0)
normalizer = logsumexp(alphal[0] + betal[0])
return expected_states, expected_transcounts, normalizer
### EM
def E_step(self):
self.clear_caches()
self.all_expected_stats = self._expected_statistics(
self.trans_matrix,self.pi_0,self.aBl)
### Viterbi
def Viterbi(self):
scores, args = self.maxsum_messages_backwards()
self.maximize_forwards(scores,args)
def maxsum_messages_backwards(self):
return self._maxsum_messages_backwards(self.trans_matrix,self.aBl)
def maximize_forwards(self,scores,args):
self.stateseq = self._maximize_forwards(scores,args,self.pi_0,self.aBl)
def mf_Viterbi(self):
scores, args = self.mf_maxsum_messages_backwards()
self.mf_maximize_forwards(scores,args)
def mf_maxsum_messages_backwards(self):
return self._maxsum_messages_backwards(self.mf_trans_matrix,self.mf_aBl)
def mf_maximize_forwards(self,scores,args):
self.stateseq = self._maximize_forwards(scores,args,self.mf_pi_0,self.mf_aBl)
@staticmethod
def _maxsum_messages_backwards(trans_matrix, log_likelihoods):
errs = np.seterr(divide='ignore')
Al = np.log(trans_matrix)
np.seterr(**errs)
aBl = log_likelihoods
scores = np.zeros_like(aBl)
args = np.zeros(aBl.shape,dtype=np.int32)
for t in xrange(scores.shape[0]-2,-1,-1):
vals = Al + scores[t+1] + aBl[t+1]
vals.argmax(axis=1,out=args[t+1])
vals.max(axis=1,out=scores[t])
return scores, args
@staticmethod
def _maximize_forwards(scores,args,init_state_distn,log_likelihoods):
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T,dtype=np.int32)
stateseq[0] = (scores[0] + np.log(init_state_distn) + aBl[0]).argmax()
for idx in xrange(1,T):
stateseq[idx] = args[idx,stateseq[idx-1]]
return stateseq
class HMMStatesEigen(HMMStatesPython):
def generate_states(self):
self.stateseq = sample_markov(
T=self.T,
trans_matrix=self.trans_matrix,
init_state_distn=self.pi_0)
### common messages (Gibbs, EM, likelihood calculation)
@staticmethod
def _messages_backwards_log(trans_matrix,log_likelihoods):
from hmm_messages_interface import messages_backwards_log
return messages_backwards_log(
trans_matrix,log_likelihoods,
np.empty_like(log_likelihoods))
@staticmethod
def _messages_forwards_log(trans_matrix,init_state_distn,log_likelihoods):
from hmm_messages_interface import messages_forwards_log
return messages_forwards_log(trans_matrix,log_likelihoods,
init_state_distn,np.empty_like(log_likelihoods))
@staticmethod
def _messages_forwards_normalized(trans_matrix,init_state_distn,log_likelihoods):
from hmm_messages_interface import messages_forwards_normalized
return messages_forwards_normalized(trans_matrix,log_likelihoods,
init_state_distn,np.empty_like(log_likelihoods))
# next three methods are just for convenient testing
def messages_backwards_log_python(self):
return super(HMMStatesEigen,self)._messages_backwards_log(
self.trans_matrix,self.aBl)
def messages_forwards_log_python(self):
return super(HMMStatesEigen,self)._messages_forwards_log(
self.trans_matrix,self.pi_0,self.aBl)
def messages_forwards_normalized_python(self):
return super(HMMStatesEigen,self)._messages_forwards_normalized(
self.trans_matrix,self.pi_0,self.aBl)
### sampling
@staticmethod
def _sample_forwards_log(betal,trans_matrix,init_state_distn,log_likelihoods):
from hmm_messages_interface import sample_forwards_log
return sample_forwards_log(trans_matrix,log_likelihoods,
init_state_distn,betal,np.empty(log_likelihoods.shape[0],dtype='int32'))
@staticmethod
def _sample_backwards_normalized(alphan,trans_matrix_transpose):
from hmm_messages_interface import sample_backwards_normalized
return sample_backwards_normalized(trans_matrix_transpose,alphan,
np.empty(alphan.shape[0],dtype='int32'))
@staticmethod
def _resample_multiple(states_list):
from hmm_messages_interface import resample_normalized_multiple
if len(states_list) > 0:
loglikes = resample_normalized_multiple(
states_list[0].trans_matrix,states_list[0].pi_0,
[s.aBl for s in states_list],[s.stateseq for s in states_list])
for s, loglike in zip(states_list,loglikes):
s._normalizer = loglike
### EM
@staticmethod
def _expected_statistics_from_messages(
trans_potential,likelihood_log_potential,alphal,betal,
expected_states=None,expected_transcounts=None):
from hmm_messages_interface import expected_statistics_log
expected_states = np.zeros_like(alphal) \
if expected_states is None else expected_states
expected_transcounts = np.zeros_like(trans_potential) \
if expected_transcounts is None else expected_transcounts
return expected_statistics_log(
np.log(trans_potential),likelihood_log_potential,alphal,betal,
expected_states,expected_transcounts)
### Vitberbi
def Viterbi(self):
from hmm_messages_interface import viterbi
self.stateseq = viterbi(self.trans_matrix,self.aBl,self.pi_0,
np.empty(self.aBl.shape[0],dtype='int32'))
class HMMStatesEigenSeparateTrans(_SeparateTransMixin,HMMStatesEigen):
pass
class HMMStatesPossibleChangepoints(_PossibleChangepointsMixin,HMMStatesEigen):
pass
class HMMStatesPossibleChangepointsSeparateTrans(
_SeparateTransMixin,
HMMStatesPossibleChangepoints):
pass
|
mit
|
megbedell/wobble
|
scripts/script_HD189733.py
|
1
|
6247
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import wobble
from time import time
import h5py
import os
if __name__ == "__main__":
starname = 'HD189733'
K_star = 0
K_t = 0
niter = 100 # for optimization
plots = True
epochs = [0, 20] # to plot
movies = False
plot_dir = '../results/plots_{0}_Kstar{1}_Kt{2}/'.format(starname, K_star, K_t)
print("running wobble on star {0} with K_star = {1}, K_t = {2}".format(starname, K_star, K_t))
start_time = time()
orders = np.arange(72)
'''
e = [ 0, 1, 6, 7, 9, 17, 18, 19, 21, 23, 24, 26, 30, 33, 34, 35, 36,
37, 38, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 55, 56, 61,
66, 69, 70, 72, 73, 75] # night of August 28, 2007
data = wobble.Data(starname+'_e2ds.hdf5', filepath='../data/', orders=orders, epochs=e)
'''
data = wobble.Data(filename='../data/'+starname+'_e2ds.hdf5', orders=orders)
orders = np.copy(data.orders)
results = wobble.Results(data=data)
results_51peg = wobble.Results(filename='/Users/mbedell/python/wobble/results/results_51peg_Kstar0_Kt0.hdf5')
print("data loaded")
print("time elapsed: {0:.2f} min".format((time() - start_time)/60.0))
elapsed_time = time() - start_time
if plots:
print("plots will be saved under directory: {0}".format(plot_dir))
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
star_learning_rate = 0.1
telluric_learning_rate = 0.01
for r,o in enumerate(orders):
model = wobble.Model(data, results, r)
model.add_star('star', variable_bases=K_star,
regularization_par_file=None,
learning_rate_template=star_learning_rate)
model.add_telluric('tellurics', rvs_fixed=True, variable_bases=K_t,
learning_rate_template=telluric_learning_rate,
template_fixed=True, template_xs=results_51peg.tellurics_template_xs[o],
template_ys=results_51peg.tellurics_template_ys[o]) # assumes all orders are there for 51 Peg
print("--- ORDER {0} ---".format(o))
if plots:
wobble.optimize_order(model, niter=niter, save_history=True,
basename=plot_dir+'history', epochs_to_plot=epochs, movies=movies)
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.plot(data.dates, results.star_rvs[r] + data.bervs - data.drifts - np.mean(results.star_rvs[r] + data.bervs),
'k.', alpha=0.8, ms=4)
ax.plot(data.dates, data.pipeline_rvs + data.bervs - np.mean(data.pipeline_rvs + data.bervs),
'r.', alpha=0.5, ms=4)
ax.set_ylabel('RV (m/s)', fontsize=14)
ax.set_xlabel('BJD', fontsize=14)
plt.savefig(plot_dir+'results_rvs_o{0}.png'.format(o))
plt.close(fig)
for e in epochs:
results.plot_spectrum(r, e, data, plot_dir+'results_synth_o{0}_e{1}.png'.format(o, e))
else:
wobble.optimize_order(model, niter=niter)
del model # not sure if this does anything
print("order {1} optimization finished. time elapsed: {0:.2f} min".format((time() - start_time)/60.0, o))
print("this order took {0:.2f} min".format((time() - start_time - elapsed_time)/60.0))
elapsed_time = time() - start_time
print("all orders optimized.")
print("time elapsed: {0:.2f} minutes".format((time() - start_time)/60.0))
results.combine_orders('star')
print("final RVs calculated.")
print("time elapsed: {0:.2f} minutes".format((time() - start_time)/60.0))
results_file = '../results/results_{0}_Kstar{1}_Kt{2}.hdf5'.format(starname, K_star, K_t)
results.write(results_file)
print("results saved as: {0}".format(results_file))
print("time elapsed: {0:.2f} minutes".format((time() - start_time)/60.0))
# do post-processing:
results.combine_orders('star')
results.apply_drifts('star')
results.apply_bervs('star')
if plots:
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw = {'height_ratios':[3, 1]})
ax.scatter(data.dates, data.pipeline_rvs - np.mean(data.pipeline_rvs),
c='r', label='DRS', alpha=0.7, s=12)
ax.scatter(data.dates, results.star_time_rvs - np.mean(results.star_time_rvs),
c='k', label='wobble', alpha=0.7, s=12)
ax.legend()
ax.set_xticklabels([])
ax2.scatter(data.dates, results.star_time_rvs - data.pipeline_rvs, c='k', s=12)
ax2.set_ylabel('JD')
fig.tight_layout()
fig.subplots_adjust(hspace=0.05)
plt.savefig(plot_dir+'results_rvs.png')
plt.close(fig)
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw = {'height_ratios':[3, 1]})
ax.scatter(data.dates % 2.21857312, data.pipeline_rvs - np.mean(data.pipeline_rvs),
c='r', label='DRS', alpha=0.7, s=12)
ax.scatter(data.dates % 2.21857312, results.star_time_rvs - np.mean(results.star_time_rvs),
c='k', label='wobble', alpha=0.7, s=12)
ax.legend()
ax.set_xticklabels([])
ax2.scatter(data.dates, results.star_time_rvs - data.pipeline_rvs, c='k', s=12)
ax2.set_ylabel('JD')
fig.tight_layout()
fig.subplots_adjust(hspace=0.05)
plt.savefig(plot_dir+'results_rvs_phased.png')
plt.close(fig)
print("final RVs calculated.")
print("time elapsed: {0:.2f} minutes".format((time() - start_time)/60.0))
# save output:
results_file = 'results/results_{0}_Kstar{1}_Kt{2}.hdf5'.format(starname, K_star, K_t)
results.write(results_file)
star_rvs_file = 'results/rvs_{0}_Kstar{1}_Kt{2}.hdf5'.format(starname, K_star, K_t)
results.write_rvs('star', star_rvs_file, all_orders=True)
print("results saved as: {0} & {1}".format(results_file, star_rvs_file))
print("-----------------------------")
print("total runtime:{0:.2f} minutes".format((time() - start_time)/60.0))
|
mit
|
craigcitro/pydatalab
|
tests/_util/generic_feature_statistics_generator_test.py
|
4
|
6972
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from google.datalab.utils.facets.generic_feature_statistics_generator \
import GenericFeatureStatisticsGenerator
import numpy as np
import pandas as pd
from tensorflow.python.platform import googletest
class GenericFeatureStatisticsGeneratorTest(googletest.TestCase):
def setUp(self):
self.gfsg = GenericFeatureStatisticsGenerator()
def testProtoFromDataFrames(self):
data = [[1, 'hi'], [2, 'hello'], [3, 'hi']]
df = pd.DataFrame(data, columns=['testFeatureInt', 'testFeatureString'])
dataframes = [{'table': df, 'name': 'testDataset'}]
p = self.gfsg.ProtoFromDataFrames(dataframes)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(2, len(test_data.features))
if test_data.features[0].name == 'testFeatureInt':
numfeat = test_data.features[0]
stringfeat = test_data.features[1]
else:
numfeat = test_data.features[1]
stringfeat = test_data.features[0]
self.assertEqual('testFeatureInt', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
self.assertEqual('testFeatureString', stringfeat.name)
self.assertEqual(self.gfsg.fs_proto.STRING, stringfeat.type)
self.assertEqual(2, stringfeat.string_stats.unique)
def testNdarrayToEntry(self):
arr = np.array([1.0, 2.0, None, float('nan'), 3.0], dtype=float)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(2, entry['missing'])
arr = np.array(['a', 'b', float('nan'), 'c'], dtype=str)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(1, entry['missing'])
def testNdarrayToEntryTimeTypes(self):
arr = np.array(
[np.datetime64('2005-02-25'),
np.datetime64('2006-02-25')],
dtype=np.datetime64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([1109289600000000000, 1140825600000000000], entry['vals'])
arr = np.array(
[np.datetime64('2009-01-01') - np.datetime64('2008-01-01')],
dtype=np.timedelta64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([31622400000000000], entry['vals'])
def testDTypeToType(self):
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.int32)))
# Boolean and time types treated as int
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.bool)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.datetime64)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.timedelta64)))
self.assertEqual(self.gfsg.fs_proto.FLOAT,
self.gfsg.DtypeToType(np.dtype(np.float32)))
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.str)))
# Unsupported types treated as string for now
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.void)))
def testGetDatasetsProtoFromEntriesLists(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
hist = numfeat.num_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoSequenceExampleHistogram(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 2, 3],
'counts': [1, 2, 1],
'feat_lens': [1, 2, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
hist = p.datasets[0].features[
0].num_stats.common_stats.feature_list_length_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1.8, buckets[9].low_value)
self.assertEqual(2, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoWithWhitelist(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
entries['ignoreFeature'] = {
'vals': [5, 6],
'counts': [1, 1],
'missing': 1,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets, features=['testFeature'])
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(1, numfeat.num_stats.min)
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
surligas/cs436-gnuradio
|
gr-digital/examples/example_timing.py
|
49
|
9180
|
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = bw
nfilts = 32
rrc_taps_rx = filter.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.off = filter.fractional_resampler_cc(0.20, 1.0)
if mode == 0:
self.clk = digital.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 1)
self.taps = self.clk.taps()
self.dtaps = self.clk.diff_taps()
self.delay = int(scipy.ceil(((len(rrc_taps)-1)/2 +
(len(self.taps[0])-1)/2)/float(sps))) + 1
self.vsnk_err = blocks.vector_sink_f()
self.vsnk_rat = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = bw
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_clk = blocks.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.src, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth (PFB) or gain (M&M) [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
delay = put.delay
m = len(data_clk.real)
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real[delay:], "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err, label="Error")
s3.plot(data_rat, 'r', label="Update rate")
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
s3.set_ylim([-0.5, 0.5])
s3.legend()
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(1e-20+abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
s32.set_ylim([-120, 10])
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real, "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
esatel/ADCPy
|
adcpy/transect_average.py
|
1
|
12135
|
# -*- coding: utf-8 -*-
"""Production example script that averages repeated tranects, resolving and visualizing secondary circulation
Driver script that is designed to average repeated ADCP transect observations
in an effort of reduce measurement error and better resolve non-steamwise and
velocities and secondary circulation features. A summary of script functions:
1) Assess an input directory for ADCP observations (raw files) that match
in space and time.
2) Group matchcing ADCP observations into groups of a maxium number for
averaging.
3) Pre-process raw ADCP observations before averaging as appropriate.
4) Bin-average pre-processed ADCP observation velcotities
5) Generate netcdf and/or CSV output files of bin-average velocities
6) Generate various plots of streamwise, depth averaged, 3D velocities
and secondary circulation features.
The script options are listed and described immediatly below this comment
block.
This code is open source, and defined by the included MIT Copyright License
Designed for Python 2.7; NumPy 1.7; SciPy 0.11.0; Matplotlib 1.2.0
2014-09 - First Release; blsaenz, esatel
"""
## START script options #######################################################
# bin-averaging paramterters
avg_dxy = 2.0 # Horizontal resolution of averaging bins {m}
avg_dz = 0.25 # Vertical resolution of averaging bins {m}
avg_max_gap_m = 30.0 # Maximum distance allowed between ADCP observations when averaging {m}
avg_max_gap_minutes = 20.0 # Maximum time allowed between ADCP observations when averaging {m}
avg_max_group_size = 6 # Maximum number of ADCP observations to average {m}
avg_bin_sd_drop = 3 # Maximum number of ADCP observations to average {m}
avg_normal_to_flow = False
# post-average options
avg_rotation = 'Rozovski' # One of ['Rozovski','no transverse flow','principal flow','normal',None]
avg_std_drop = 3.0 # Standard deviation of velocity, above which samples are dropped from analysis {0.0=no dropping, 2.0-3.0 typically [number of standard deviations]}
avg_std_interp = True # Perform interpolation of holes in velocity profiles left by high standard deviation removal {typically True with std_drop > 0.0}
avg_smooth_kernel = 3 # Smooth velocity data using a square kernel box-filter, with side dimension = avg_smooth_kernel. 0 = no kernel smoothing
avg_save_netcdf = True # Save bin-averaged velocities as an ADCPData netcdf file
avg_save_csv = True # Save bin-averaged velocities as a CSV text file
avg_plot_xy = True # Generate a composite plot of survey location(s) of original ADCP ensembles
avg_plot_avg_n_sd = True # Generate image plots of bin-averaged U,V,W velocities, and the number and standard deviation of bin averages
avg_plot_mean_vectors = True # Generate an arrow plot of bin-averaged U-V mean velocities in the x-y plane
avg_plot_secondary_circulation = True # Generate an image plot of 2D bin-averaged steamwise (u) velocities, overlain by an arrow plot showing secondary circulation in the V-W plane
avg_plot_uvw_velocity_array = True # Generate a 3-panel image plot showing bin-averaged U,V,W velocities in the V-W plane
avg_plot_flow_summmary = True # Generate a summary plot showing image plots of U,V bin-averaged velocities, an arrow plot of bin-averaged U-V mean velocities, and flow/discharge calculations
avg_save_plots = True # Save the plots to disk
avg_show_plots = False # Print plots to screen (pauses execution until plots are manually closed)
## END script options #########################################################
import os
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import transect_preprocessor
reload(transect_preprocessor)
import adcpy
reload(adcpy)
from adcpy_recipes import *
def transect_average(pre_process_input_file=None):
"""
Received a list of ADCPTransectData objects from transect_preprocessor.py,
and then groups and bin-averages the transects. Group average ADCPData
objects, velocties, and plots are optionally output to the outpath supplied
by transect_preprocessor().
Inputs:
pre_process_input_file = path to a transect_preprocessor input file,
or None to use the default file.
"""
(transects,outpath) = transect_preprocessor.transect_preprocessor(pre_process_input_file)
print 'total transects loaded:',len(transects)
grps_to_average = group_adcp_obs_by_spacetime(transects,
max_gap_m=avg_max_gap_m,
max_gap_minutes=avg_max_gap_minutes,
max_group_size=avg_max_group_size)
print 'total groups to average',len(grps_to_average)
#write_csv_velocity(transects[0],os.path.join(outpath,'temp.csv'),no_header=True)
#grps_to_average = grps_to_average[1:]
grp_num = 0
track_fig=101
for grp in grps_to_average:
if avg_plot_xy:
adcpy.plot.plot_obs_group_xy_lines(grp,fig=track_fig,title='Group%03i Source Observations'%grp_num)
avg = average_transects(grp,
dxy=avg_dxy,
dz=avg_dz,
return_adcpy=True,
plotline_from_flow=avg_normal_to_flow,
sd_drop=avg_bin_sd_drop)
if avg_plot_xy:
adcpy.plot.get_fig(fig=track_fig)
plt.plot(avg.xy[:,0],avg.xy[:,1],label='average projection')
plt.legend(prop={'size':10})
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_xy_lines.png"%grp_num))
if avg_rotation is not None:
avg = transect_rotate(avg,avg_rotation)
if avg_std_drop > 0:
avg.sd_drop(sd=3.0,
sd_axis='elevation',
interp_holes=avg_std_interp)
avg.sd_drop(sd=3.0,
sd_axis='ensemble',
interp_holes=avg_std_interp)
if avg_smooth_kernel > 2:
avg.kernel_smooth(kernel_size = 3)
if avg_save_csv:
write_csv_velocity_array(avg,os.path.join(outpath,'group%03i_velocity.csv'%grp_num),no_header=True)
write_csv_velocity_db(avg,os.path.join(outpath,'group%03i_velocity_db.csv'%grp_num),no_header=False)
write_ensemble_mean_velocity_db(avg,os.path.join(outpath,'group%03i_velocity_depth_means.csv'%grp_num),
no_header=False,range_from_velocities=True)
if avg_save_netcdf:
fname = os.path.join(outpath,'group%03i.nc'%grp_num)
avg.write_nc(fname,zlib=True)
if avg_plot_avg_n_sd:
uvw = 'uvw'
for i in range(3):
plot_avg_n_sd(avg,i,0.05)
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_%s_avg_n_sd.png"%(grp_num,uvw[i])))
if avg_plot_mean_vectors:
fig3 = adcpy.plot.plot_ensemble_mean_vectors(avg,title='Group%03i Mean Velocity [m/s]'%grp_num)
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_mean_velocity.png"%grp_num))
if avg_plot_secondary_circulation:
fig4 = adcpy.plot.plot_secondary_circulation(avg,u_vecs=30,v_vecs=30,
title='Group%03i Cross-Stream Velocity [m/s] and Secondary Circulation Vectors'%grp_num)
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_secondary_circulation.png"%grp_num))
if avg_plot_uvw_velocity_array:
fig5 = adcpy.plot.plot_uvw_velocity_array(avg.velocity,
title='Group%03i Velocity [m/s]'%grp_num,
ures=0.1,vres=0.1,wres=0.05)
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_uvw_velocity.png"%grp_num))
if avg_plot_flow_summmary:
fig6 = adcpy.plot.plot_flow_summmary(avg,title='Group%03i Streamwise Summary'%grp_num,
ures=0.1,vres=0.1,use_grid_flows=True)
if avg_save_plots:
plt.savefig(os.path.join(outpath,"group%03i_flow_summary.png"%grp_num))
if avg_show_plots:
plt.show()
plt.close('all')
grp_num += 1
print 'ADCP processing complete!'
def plot_avg_n_sd(avg,uvw,resolution=0.1):
"""
Generates a vertical three-panel plot, showing images of a bin-averaged
velocity, the number of velociy measurements in each bin, and the bin standard
deviation velocity. Desinged to be used in conjuction with
transect_average() output.
Inputs:
avg = ADCPData object, with extra velocity_n and velocity_sd data
fields produced by transect_average()
uvw = python string, either 'U','V', or 'W' to select which velocity
compontent to plot.
resolution = optional value to round the plot velocity scales up toward
"""
if uvw == 0:
v_str = 'U'
elif uvw == 1:
v_str = 'V'
else:
v_str = 'W'
inv = 1/resolution
xx,yy,dd,pline = adcpy.util.find_projection_distances(avg.xy)
mtest = np.floor(avg.velocity[...,uvw]*inv)
minv = np.nanmin(np.nanmin(mtest))*resolution
mtest = np.ceil(avg.velocity[...,uvw]*inv)
maxv = np.nanmax(np.nanmax(mtest))*resolution
avg_panel = adcpy.plot.IPanel(velocity = avg.velocity[:,:,uvw],
x = dd,
y = avg.bin_center_elevation,
minv = minv,
maxv = maxv,
xlabel = 'm',
ylabel = 'm',
units = 'm/s',
use_pcolormesh = True,
title='%s Averaged Velocity [m/s]'%v_str)
maxv = np.nanmax(np.nanmax(avg.velocity_n[...,uvw]))
n_panel = adcpy.plot.IPanel(velocity = avg.velocity_n[:,:,uvw],
x = dd,
y = avg.bin_center_elevation,
minv = 0,
maxv = maxv,
xlabel = 'm',
ylabel = 'm',
units = 'number',
use_pcolormesh = True,
title='n Samples')
mtest = np.floor(avg.velocity_sd[...,uvw]*inv)
minv = np.nanmin(np.nanmin(mtest))*resolution
mtest = np.ceil(avg.velocity_sd[...,uvw]*inv)
maxv = np.nanmax(np.nanmax(mtest))*resolution
sd_panel = adcpy.plot.IPanel(velocity = avg.velocity_sd[:,:,uvw],
x = dd,
y = avg.bin_center_elevation,
minv = 0,
maxv = maxv,
xlabel = 'm',
ylabel = 'm',
units = 'm/s',
use_pcolormesh = True,
title='Standard Deviation [m/s]')
fig = adcpy.plot.plot_vertical_panels((avg_panel,n_panel,sd_panel))
return fig
def main():
import sys
prepro_input = sys.argv[1]
transect_average(prepro_input)
# run myself
if __name__ == "__main__":
#transect_average('trn_pre_input_GEO20090106.py')
main()
|
mit
|
xiaoxiamii/scikit-learn
|
sklearn/utils/extmath.py
|
70
|
21951
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
bsd-3-clause
|
fspaolo/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
huzq/scikit-learn
|
sklearn/cluster/_bicluster.py
|
3
|
21109
|
"""Spectral biclustering algorithms."""
# Authors : Kemal Eren
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, _deprecate_positional_args
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs='deprecated', random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
y : Ignored
"""
if self.n_jobs != 'deprecated':
warnings.warn("'n_jobs' was deprecated in version 0.23 and will be"
" removed in 0.25.", FutureWarning)
X = self._validate_data(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : int, default=3
The number of biclusters to find.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random', or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
0.25.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like of shape (n_cols,)
The bicluster label of each column.
Examples
--------
>>> from sklearn.cluster import SpectralCoclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_ #doctest: +SKIP
array([0, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_ #doctest: +SKIP
array([0, 0], dtype=int32)
>>> clustering
SpectralCoclustering(n_clusters=2, random_state=0)
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
@_deprecate_positional_args
def __init__(self, n_clusters=3, *, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs='deprecated', random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c
for c in range(self.n_clusters)])
self.columns_ = np.vstack([self.column_labels_ == c
for c in range(self.n_clusters)])
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
The number of row and column clusters in the checkerboard
structure.
method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'.
.. warning::
if `method='log'`, the data must be sparse.
n_components : int, default=6
Number of singular vectors to check.
n_best : int, default=3
Number of best singular vectors to which to project the data
for clustering.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'} or ndarray of (n_clusters, n_features), \
default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. deprecated:: 0.23
``n_jobs`` was deprecated in version 0.23 and will be removed in
0.25.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
Row partition labels.
column_labels_ : array-like of shape (n_cols,)
Column partition labels.
Examples
--------
>>> from sklearn.cluster import SpectralBiclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_
array([0, 1], dtype=int32)
>>> clustering
SpectralBiclustering(n_clusters=2, random_state=0)
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
@_deprecate_positional_args
def __init__(self, n_clusters=3, *, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs='deprecated', random_state=None):
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super()._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack([self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)])
self.columns_ = np.vstack([self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)])
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
morrisonwudi/zipline
|
zipline/utils/cli.py
|
10
|
8575
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
# Remove print_algo kwarg to overwrite below.
args.pop('print_algo')
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
asset_metadata=asset_metadata,
identifiers=symbols,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
|
apache-2.0
|
returnandrisk/meucci-python
|
dynamic_allocation_performance_analysis.py
|
1
|
7540
|
"""
Python code for blog post "mini-Meucci : Applying The Checklist - Steps 10+"
http://www.returnandrisk.com/2016/07/mini-meucci-applying-checklist-steps-10.html
Copyright (c) 2016 Peter Chan (peter-at-return-and-risk-dot-com)
"""
###############################################################################
# Dynamic Allocation
###############################################################################
#%matplotlib inline
import rnr_meucci_functions as rnr
import numpy as np
from zipline.api import (set_slippage, slippage, set_commission, commission,
order_target_percent, record, schedule_function,
date_rules, time_rules, get_datetime, symbol)
# Set tickers for data loading i.e. DJIA constituents and DIA ETF for benchmark
tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS', 'DIA']
# Set investable asset tickers
asset_tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS']
def initialize(context):
# Turn off the slippage model
set_slippage(slippage.FixedSlippage(spread=0.0))
# Set the commission model
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
context.day = -1 # using zero-based counter for days
context.set_benchmark(symbol('DIA'))
context.assets = []
print('Setup investable assets...')
for ticker in asset_tickers:
#print(ticker)
context.assets.append(symbol(ticker))
context.n_asset = len(context.assets)
context.n_portfolio = 40 # num mean-variance efficient portfolios to compute
context.today = None
context.tau = None
context.min_data_window = 756 # min of 3 yrs data for calculations
context.first_rebal_date = None
context.first_rebal_idx = None
context.weights = None
# Schedule dynamic allocation calcs to occur 1 day before month end - note that
# actual trading will occur on the close on the last trading day of the month
schedule_function(rebalance,
date_rule=date_rules.month_end(days_offset=1),
time_rule=time_rules.market_close())
# Record some stuff every day
schedule_function(record_vars,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close())
def handle_data(context, data):
context.day += 1
#print(context.day)
def rebalance(context, data):
# Wait for 756 trading days (3 yrs) of historical prices before trading
if context.day < context.min_data_window - 1:
return
# Get expanding window of past prices and compute returns
context.today = get_datetime().date()
prices = data.history(context.assets, "price", context.day, "1d")
if context.first_rebal_date is None:
context.first_rebal_date = context.today
context.first_rebal_idx = context.day
print('Starting dynamic allocation simulation...')
# Get investment horizon in days ie number of trading days next month
context.tau = rnr.get_num_days_nxt_month(context.today.month, context.today.year)
# Calculate HFP distribution
asset_rets = np.array(prices.pct_change(context.tau).iloc[context.tau:, :])
num_scenarios = len(asset_rets)
# Set Flexible Probabilities Using Exponential Smoothing
half_life_prjn = 252 * 2 # in days
lambda_prjn = np.log(2) / half_life_prjn
probs_prjn = np.exp(-lambda_prjn * (np.arange(0, num_scenarios)[::-1]))
probs_prjn = probs_prjn / sum(probs_prjn)
mu_pc, sigma2_pc = rnr.fp_mean_cov(asset_rets.T, probs_prjn)
# Perform shrinkage to mitigate estimation risk
mu_shrk, sigma2_shrk = rnr.simple_shrinkage(mu_pc, sigma2_pc)
weights, _, _ = rnr.efficient_frontier_qp_rets(context.n_portfolio,
sigma2_shrk, mu_shrk)
print('Optimal weights calculated 1 day before month end on %s (day=%s)' \
% (context.today, context.day))
#print(weights)
min_var_weights = weights[0,:]
# Rebalance portfolio accordingly
for stock, weight in zip(prices.columns, min_var_weights):
order_target_percent(stock, np.asscalar(weight))
context.weights = min_var_weights
def record_vars(context, data):
record(weights=context.weights, tau=context.tau)
def analyze(perf, bm_value, start_idx):
pd.DataFrame({'portfolio':results.portfolio_value,'benchmark':bm_value})\
.iloc[start_idx:,:].plot(title='Portfolio Performance vs Benchmark')
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_bars_from_yahoo
import pandas as pd
import matplotlib.pyplot as plt
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
start = datetime(2010, 5, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2016, 5, 31, 0, 0, 0, 0, pytz.utc)
print('Getting Yahoo data for 30 DJIA stocks and DIA ETF as benchmark...')
data = load_bars_from_yahoo(stocks=tickers, start=start, end=end)
# Check price data
data.loc[:, :, 'price'].plot(figsize=(8,7), title='Input Price Data')
plt.ylabel('price in $');
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.show()
# Run algorithm
results = algo.run(data)
# Fix possible issue with timezone
results.index = results.index.normalize()
if results.index.tzinfo is None:
results.index = results.index.tz_localize('UTC')
# Adjust benchmark returns for delayed trading due to 3 year min data window
bm_rets = algo.perf_tracker.all_benchmark_returns
bm_rets[0:algo.first_rebal_idx + 2] = 0
bm_rets.name = 'DIA'
bm_rets.index.freq = None
bm_value = algo.capital_base * np.cumprod(1+bm_rets)
# Plot portfolio and benchmark values
analyze(results, bm_value, algo.first_rebal_idx + 1)
print('End value portfolio = {:.0f}'.format(results.portfolio_value.ix[-1]))
print('End value benchmark = {:.0f}'.format(bm_value[-1]))
# Plot end weights
pd.DataFrame(results.weights.ix[-1], index=asset_tickers, columns=['w'])\
.sort_values('w', ascending=False).plot(kind='bar', \
title='End Simulation Weights', legend=None);
###############################################################################
# Sequel Step - Ex-post performance analysis
###############################################################################
import pyfolio as pf
returns, positions, transactions, gross_lev = pf.utils.\
extract_rets_pos_txn_from_zipline(results)
trade_start = results.index[algo.first_rebal_idx + 1]
trade_end = datetime(2016, 5, 31, 0, 0, 0, 0, pytz.utc)
print('Annualised volatility of the portfolio = {:.4}'.\
format(pf.timeseries.annual_volatility(returns[trade_start:trade_end])))
print('Annualised volatility of the benchmark = {:.4}'.\
format(pf.timeseries.annual_volatility(bm_rets[trade_start:trade_end])))
print('')
pf.create_returns_tear_sheet(returns[trade_start:trade_end],
benchmark_rets=bm_rets[trade_start:trade_end],
return_fig=False)
|
mit
|
mne-tools/mne-tools.github.io
|
0.12/_downloads/plot_eog_artifact_histogram.py
|
22
|
1474
|
"""
========================
Show EOG artifact timing
========================
Compute the distribution of timing for EOG artifacts.
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
###############################################################################
# Plot EOG artifact distribution
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
|
bsd-3-clause
|
frongk/tilde_stuff
|
verboseUsers.py
|
1
|
1853
|
from bs4 import BeautifulSoup
import urllib
import re
#import pickle
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
#FOR TESTING
#htmlIn = urllib.urlopen("http://tilde.town/~fr4nk")
#soup=BeautifulSoup(htmlIn,'html.parser')
#text=soup.prettify()
#text = soup.get_text()
#print text
def getUsers():
htmlIn = urllib.urlopen("http://tilde.town/")
soup=BeautifulSoup(htmlIn,'html.parser')
userL = '/~'
tildeList = []
for link in soup.find_all('a'):
if re.search(userL,link.get('href')):
tildeList.append( link.get('href'))
return tildeList
userPageSet = []
userPageSplit = []
userList = []
for idx, user in enumerate(getUsers()):
if re.search('http',user) is None:
userList.append(user)
userUrl = "http://tilde.town" + user
htmlIn = urllib.urlopen(userUrl)
soup = BeautifulSoup(htmlIn,'html.parser')
#clean user pages here for special characters such as \n or \t
userPageSet.append( soup.get_text() )
userPageSplit.append(userPageSet[-1].split())
print idx, user
#get basic word counts
wordcounts = []
for page in userPageSplit:
wordcounts.append(len(page))
wcountdf = pd.DataFrame({'users': userList, 'words': wordcounts})
wcountdf = wcountdf.drop_duplicates()
wcountdf = wcountdf.sort(['words'],ascending=[0])
wcountTop = wcountdf[:10]
sns.barplot(x = "words", y = "users", data = wcountTop)
plt.title('Most Verbose Users')
plt.xlabel('# of words scraped from page')
#for page in userPageSet:
#
#
#print userPageSet
# Saving the objects:
#with open('userPageSet.pickle', 'w') as f:
# pickle.dump([userPageSet], f)
# Getting back the objects:
#with open('userPageSet.pickle') as f:
# userPageSet = pickle.load(f)
|
gpl-2.0
|
hainm/scikit-learn
|
examples/plot_digits_pipe.py
|
250
|
1809
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
RayMick/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
56
|
37976
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
|
bsd-3-clause
|
andim/scipy
|
scipy/special/basic.py
|
2
|
63696
|
#
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta,
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch,
binom)
from . import specfun
from . import orthogonal
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where n is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8,8))
>>> for idx, n in enumerate([2,3,4,9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute nt zeros of Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length nt, corresponding to the first nt zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute nt zeros of Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute nt zeros of Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute nt zeros of Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute nt zeros of Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Kv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
"""Compute Ricatti-Bessel function of the first kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:`f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x, n, k=0.0):
"""Compute nth-order generalized (associated) Laguerre polynomial.
The polynomial :math:`L^(alpha)_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**alpha`` with ``alpha > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first nt zeros, a, of the Airy function Ai(x); first nt zeros,
a', of the derivative of the Airy function Ai'(x); the corresponding values
Ai(a'); and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First nt zeros of Ai(x)
ap : ndarray
First nt zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first nt zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first nt zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first nt zeros, b, of the Airy function Bi(x); first nt zeros,
b', of the derivative of the Airy function Bi'(x); the corresponding values
Bi(b'); and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First nt zeros of Bi(x)
bp : ndarray
First nt zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first nt zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first nt zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
"""Jahnke-Emden Lambda function, Lambdav(x).
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
N = int(N)
k = int(k)
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def factorial(n, exact=False):
"""The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=False case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3,4,5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1, n+1):
val *= k
return val
else:
n = asarray(n)
vals = gamma(n+1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
|
bsd-3-clause
|
jerli/sympy
|
sympy/external/importtools.py
|
85
|
7294
|
"""Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
|
bsd-3-clause
|
jungla/ICOM-fluidity-toolbox
|
2D/U/plot_Wn_v_snap.py
|
1
|
3823
|
import os, sys
import fio, myfun
import vtktools
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from scipy import interpolate
import gc
gc.enable()
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
label = 'r_3k_B_1F0'
basename = 'ring'
dayi = 50
dayf = 51
days = 1
label = sys.argv[1]
basename = sys.argv[2]
dayi = int(sys.argv[3])
dayf = int(sys.argv[4])
days = int(sys.argv[5])
path = '/tamay2/mensa/fluidity/'+label+'/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
# dimensions archives
xn = 300
zn = 100
Xlist = np.linspace(-150000,150000,xn)# x co-ordinates of the desired array shape
Zlist = np.linspace(0,-900,zn)# x co-ordinates of the desired array shape
[X,Z] = np.meshgrid(Xlist,Zlist)
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
latitude = 0
for time in range(dayi,dayf,days):
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
#
file0 = basename + '_' + str(time) + '.pvtu'
filepath = path+file0
file1 = label+'_' + tlabel
fileout = path + file1
#
print 'opening: ', filepath
#
#
data = vtktools.vtu(filepath)
print 'extract V, R'
data.Crop(np.min(Xlist),np.max(Xlist),latitude-3000,latitude+3000,np.min(Zlist),np.max(Zlist))
coords = data.GetLocations()
V = data.GetVectorField('Velocity_CG')
Rho = data.GetScalarField('Density_CG')
del data
#
W = V[np.around(coords[:,1])==latitude,2]
R = Rho[np.around(coords[:,1])==latitude]
del V
del Rho
Cw = coords[np.around(coords[:,1])==latitude,:]
Wr = interpolate.griddata((Cw[:,0],Cw[:,2]),W,(X,Z),method='linear')
Wr = np.reshape(Wr,[len(Zlist),len(Xlist)])
Rr = interpolate.griddata((Cw[:,0],Cw[:,2]),R,(X,Z),method='linear')
Rr = np.reshape(Rr,[len(Zlist),len(Xlist)])
gc.collect()
N = np.zeros(Wr.shape)
for i in range(len(Xlist)):
N[:,i] = -9.81/Rr[:,i]*np.gradient(Wr[:,i])/np.gradient(Zlist)
Nm = np.mean(N,0)
Nm = np.mean(Nm,0)
Wn = np.zeros(Wr.shape)
for i in range(len(Xlist)):
Wn[:,i] = Wr[:,i]*N[:,i]/Nm
#
# mld = np.zeros([len(Xlist),len(Ylist)])
#
# for x in range(len(Xlist)):
# for y in range(len(Ylist)):
# ml = rho[:,x,y]
# mls = np.cumsum(ml)/range(1,len(ml)+1)
# mlst, = np.where(mls>=ml)
# mld[x,y] = ((Zlist[mlst[len(mlst)-1]]))
#plt.plot(np.mean(mld,1))
#plt.savefig('./plot/'+label+'/'+file1+'_MLD.eps',bbox_inches='tight')
#plt.close()
# Density
fig = plt.figure(figsize=(8, 10))
# for d in depths:
# plt.axhline(y=d, xmin=-180000, xmax=180000,color='k',linestyle='--')
v = np.linspace(-1e-6, 1e-6, 50, endpoint=True)
vl = np.linspace(-1e-6, 1e-6, 5, endpoint=True)
plt.contourf(Xlist/1000,Zlist,N,50,extend='both',cmap=plt.cm.PiYG)
plt.colorbar()
plt.contour(Xlist/1000,Zlist,Rr,10,colors='k')
# plt.colorbar()
# plt.plot(Zlist)
# plt.plot(Xlist,np.mean(mld,1),'r-')
plt.xlabel('X (m)')
plt.ylabel('Z (m)')
# plt.xticks(range(lati,lonf,1000),(range(0,15,1)))
# plt.yticks(range(depthi,depthf,10),(range(0,15,1)))
plt.title('N^2')
plt.savefig('./plot/'+label+'/N2_'+file1+'_v_snap.eps',bbox_inches='tight')
plt.close()
print 'saved '+'./plot/'+label+'/N2_'+file1+'_v_snap.eps\n'
# W normalized
fig = plt.figure(figsize=(8, 10))
# v = np.linspace(np.percentile(Wn,1), np.percentile(Wn,99), 50, endpoint=True)
v = np.linspace(-1e-5, 1e-5, 50, endpoint=True)
vl = np.linspace(-1e-6, 1e-6, 5, endpoint=True)
plt.contourf(Xlist/1000,Zlist,Wn,v,extend='both',cmap=plt.cm.PiYG)
plt.colorbar()
plt.contour(Xlist/1000,Zlist,Rr,10,colors='k')
plt.xlabel('X (m)')
plt.ylabel('Z (m)')
plt.title('W normalized')
plt.savefig('./plot/'+label+'/Wn_'+file1+'_v_snap.eps',bbox_inches='tight')
plt.close()
print 'saved '+'./plot/'+label+'/Wn_'+file1+'_v_snap.eps\n'
|
gpl-2.0
|
dhruv13J/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
262
|
7954
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
jaakkojulin/potku
|
Widgets/MatplotlibTofeHistogramWidget.py
|
1
|
31269
|
# coding=utf-8
'''
Created on 18.4.2013
Updated on 30.8.2013
Potku is a graphical user interface for analyzation and
visualization of measurement data collected from a ToF-ERD
telescope. For physics calculations Potku uses external
analyzation components.
Copyright (C) Jarkko Aalto, Timo Konu, Samuli Kärkkäinen, Samuli Rahkonen and
Miika Raunio
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program (file named 'LICENCE').
'''
__author__ = "Jarkko Aalto \n Timo Konu \n Samuli Kärkkäinen \n Samuli Rahkonen \n Miika Raunio"
__versio__ = "1.0"
from matplotlib import cm
from matplotlib.colors import LogNorm
from PyQt4 import QtCore, QtGui
from Dialogs.SelectionDialog import SelectionSettingsDialog
from Dialogs.GraphSettingsDialog import TofeGraphSettingsWidget
from Modules.Functions import open_file_dialog
from Widgets.MatplotlibWidget import MatplotlibWidget
class MatplotlibHistogramWidget(MatplotlibWidget):
'''Matplotlib histogram widget, used to graph "bananas" (ToF-E).
'''
color_scheme = {"Default color":"jet",
"Greyscale":"Greys",
"Greyscale (inverted)":"gray"}
tool_modes = { 0 : "",
1 : "pan/zoom", # Matplotlib's drag
2 : "zoom rect", # Matplotlib's zoom
3 : "selection tool",
4 : "selection select tool"
}
def __init__(self, parent, measurement_data, masses, icon_manager):
'''Inits histogram widget
Args:
parent: A TofeHistogramWidget class object.
measurement_data: A list of data points.
icon_manager: IconManager class object.
masses: A masses class object.
icon_manager: An iconmanager class object.
'''
super().__init__(parent)
self.canvas.manager.set_title("ToF-E Histogram")
self.axes.fmt_xdata = lambda x: "{0:1.0f}".format(x)
self.axes.fmt_ydata = lambda y: "{0:1.0f}".format(y)
self.__masses = masses
self.__icon_manager = icon_manager
# Connections and setup
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('motion_notify_event', self.__on_motion)
self.__fork_toolbar_buttons()
self.measurement = measurement_data
self.__x_data = [x[0] for x in self.measurement.data]
self.__y_data = [x[1] for x in self.measurement.data]
# Variables
self.__inverted_Y = False
self.__inverted_X = False
self.__transposed = False
self.__inited__ = False
self.__range_mode_automated = False
# Get settings from global settings
self.__global_settings = self.main_frame.measurement.project.global_settings
self.invert_Y = self.__global_settings.get_tofe_invert_y()
self.invert_X = self.__global_settings.get_tofe_invert_x()
self.transpose_axes = self.__global_settings.get_tofe_transposed()
self.measurement.color_scheme = self.__global_settings.get_tofe_color()
self.compression_x = self.__global_settings.get_tofe_compression_x()
self.compression_y = self.__global_settings.get_tofe_compression_y()
self.axes_range_mode = self.__global_settings.get_tofe_bin_range_mode()
x_range = self.__global_settings.get_tofe_bin_range_x()
y_range = self.__global_settings.get_tofe_bin_range_y()
self.axes_range = [x_range, y_range]
self.__x_data_min, self.__x_data_max = self.__fix_axes_range(
(min(self.__x_data), max(self.__x_data)),
self.compression_x)
self.__y_data_min, self.__y_data_max = self.__fix_axes_range(
(min(self.__y_data), max(self.__y_data)),
self.compression_y)
self.name_y_axis = "Energy (Ch)"
self.name_x_axis = "time of flight (Ch)"
self.on_draw()
def on_draw(self):
'''Draw method for matplotlib.
'''
# Values for zoom
x_min, x_max = self.axes.get_xlim()
y_min, y_max = self.axes.get_ylim()
x_data = self.__x_data
y_data = self.__y_data
# Transpose
if self.transpose_axes:
x_data, y_data = y_data, x_data # Always transpose data if checked.
if not self.__transposed:
self.__transposed = True
self.measurement.selector.transpose(True)
# Switch axes names
self.name_x_axis, self.name_y_axis = (self.name_y_axis,
self.name_x_axis)
# Switch min & max values
x_min, x_max, y_min, y_max = y_min, y_max, x_min, x_max
# Switch inverts
self.invert_X, self.invert_Y = self.invert_Y, self.invert_X
if not self.transpose_axes and self.__transposed:
self.__transposed = False
self.measurement.selector.transpose(False)
# Switch axes names
self.name_x_axis, self.name_y_axis = self.name_y_axis, self.name_x_axis
# Switch min & max values
x_min, x_max, y_min, y_max = y_min, y_max, x_min, x_max
# Switch inverts
self.invert_X, self.invert_Y = self.invert_Y, self.invert_X
self.axes.clear() # Clear old stuff
# Check values for graph
axes_range = None
bin_counts = ((self.__x_data_max - self.__x_data_min) / self.compression_x,
(self.__y_data_max - self.__y_data_min) / self.compression_y)
if self.axes_range_mode == 1:
axes_range = list(self.axes_range)
axes_range[0] = self.__fix_axes_range(axes_range[0], self.compression_x)
axes_range[1] = self.__fix_axes_range(axes_range[1], self.compression_y)
x_length = axes_range[0][1] - axes_range[0][0]
y_length = axes_range[1][1] - axes_range[1][0]
bin_counts = (x_length / self.compression_x,
y_length / self.compression_y)
# If bin count too high -> it will crash the program
if bin_counts[0] > 3500:
old_count = bin_counts[0]
bin_counts = (3500, bin_counts[1])
# TODO: Better location for message?
print("[WARNING] {0}: X axis bin count ({2}) above 3500. {1}".format(
self.measurement.measurement_name,
"Limiting to prevent crash.",
old_count))
if bin_counts[1] > 3500:
old_count = bin_counts[1]
bin_counts = (bin_counts[0], 3500)
print("[WARNING] {0}: Y axis bin count ({2}) above 3500. {1}".format(
self.measurement.measurement_name,
"Limiting to prevent crash.",
old_count))
use_color_scheme = self.measurement.color_scheme
color_scheme = MatplotlibHistogramWidget.color_scheme[use_color_scheme]
colormap = cm.get_cmap(color_scheme)
self.axes.hist2d(x_data,
y_data,
bins=bin_counts,
norm=LogNorm(),
range=axes_range,
cmap=colormap)
self.__on_draw_legend()
if (x_max > 0.09 and x_max < 1.01): # This works..
x_min, x_max = self.axes.get_xlim()
if (y_max > 0.09 and y_max < 1.01): # or self.axes_range_mode
y_min, y_max = self.axes.get_ylim()
# Change zoom limits if compression factor was changed (or new graph).
if (not self.__range_mode_automated and self.axes_range_mode == 0) \
or self.axes_range_mode == 1:
# self.__range_mode_automated and self.axes_range_mode == 1
tx_min, tx_max = self.axes.get_xlim()
ty_min, ty_max = self.axes.get_ylim()
# If user has zoomed the graph, change the home position to new max.
# Else reset the graph to new ranges and clear zoom levels.
if self.mpl_toolbar._views:
self.mpl_toolbar._views[0][0] = (tx_min, tx_max, ty_min, ty_max)
else:
x_min, x_max = tx_min, tx_max
y_min, y_max = ty_min, ty_max
self.mpl_toolbar.update()
self.__range_mode_automated = self.axes_range_mode == 0
# print(self.axes.get_xlim())
# Set limits accordingly
self.axes.set_ylim([y_min, y_max])
self.axes.set_xlim([x_min, x_max])
self.measurement.draw_selection()
# Invert axis
if self.invert_Y and not self.__inverted_Y:
self.axes.set_ylim(self.axes.get_ylim()[::-1])
self.__inverted_Y = True
elif not self.invert_Y and self.__inverted_Y:
self.axes.set_ylim(self.axes.get_ylim()[::-1])
self.__inverted_Y = False
if self.invert_X and not self.__inverted_X:
self.axes.set_xlim(self.axes.get_xlim()[::-1])
self.__inverted_X = True
elif not self.invert_X and self.__inverted_X:
self.axes.set_xlim(self.axes.get_xlim()[::-1])
self.__inverted_X = False
# [::-1] is elegant reverse. Slice sequence with step of -1.
# http://stackoverflow.com/questions/3705670/
# best-way-to-create-a-reversed-list-in-python
# self.axes.set_title('ToF Histogram\n\n')
self.axes.set_ylabel(self.name_y_axis.title())
self.axes.set_xlabel(self.name_x_axis.title())
# Remove axis ticks and draw
self.remove_axes_ticks()
self.canvas.draw()
def __fix_axes_range(self, axes_range, compression):
"""Fixes axes' range to be divisible by compression.
"""
rmin, rmax = axes_range
mod = (rmax - rmin) % compression
if mod == 0: # Everything is fine, return.
return axes_range
# More data > less data
rmax += compression - mod
return rmin, rmax
def __set_y_axis_on_right(self, yes):
if yes:
# self.axes.spines['left'].set_color('none')
self.axes.spines['right'].set_color('black')
self.axes.yaxis.tick_right()
self.axes.yaxis.set_label_position("right")
else:
self.axes.spines['left'].set_color('black')
# self.axes.spines['right'].set_color('none')
self.axes.yaxis.tick_left()
self.axes.yaxis.set_label_position("left")
def __set_x_axis_on_top(self, yes):
if yes:
# self.axes.spines['bottom'].set_color('none')
self.axes.spines['top'].set_color('black')
self.axes.xaxis.tick_top()
self.axes.xaxis.set_label_position("top")
else:
self.axes.spines['bottom'].set_color('black')
# self.axes.spines['top'].set_color('none')
self.axes.xaxis.tick_bottom()
self.axes.xaxis.set_label_position("bottom")
def __on_draw_legend(self):
self.axes.legend_ = None
if not self.measurement.selector.selections:
return
if not self.__inited__: # Do this only once.
self.fig.tight_layout(pad=0.5)
box = self.axes.get_position()
self.axes.set_position([box.x0,
box.y0,
box.width * 0.9,
box.height])
self.__inited__ = True
selection_legend = {}
# Get selections for legend
for sel in self.measurement.selector.selections:
rbs_string = ""
if sel.type == "ERD":
element_object = sel.element
elif sel.type == "RBS":
element_object = sel.element_scatter
rbs_string = "*"
sel.points.set_marker(None) # Remove markers for legend.
dirtyinteger = 0
key_string = "{0}{1}".format(element_object, dirtyinteger)
while key_string in selection_legend.keys():
dirtyinteger += 1
key_string = "{0}{1}".format(element_object,
dirtyinteger)
element, isotope = element_object.get_element_and_isotope()
label = r"$^{" + str(isotope) + "}$" + element + rbs_string
mass = str(isotope)
if not mass:
mass = self.__masses.get_standard_isotope(element)
else:
mass = float(mass)
selection_legend[key_string] = (label, mass, sel.points)
# Sort legend text
sel_text = []
sel_points = []
# keys = sorted(selection_legend.keys())
items = sorted(selection_legend.items(), key=lambda x: x[1][1])
for item in items:
# [0] is the key of the item.
sel_text.append(item[1][0])
sel_points.append(item[1][2])
leg = self.axes.legend(sel_points,
sel_text,
loc=3,
bbox_to_anchor=(1, 0),
borderaxespad=0,
prop={'size':12})
for handle in leg.legendHandles:
handle.set_linewidth(3.0)
# Set the markers back to original.
for sel in self.measurement.selector.selections:
sel.points.set_marker(sel.LINE_MARKER)
def __toggle_tool_drag(self):
if self.__button_drag.isChecked():
self.mpl_toolbar.mode_tool = 1
else:
self.mpl_toolbar.mode_tool = 0
# self.elementSelectionButton.setChecked(False)
# self.elementSelectUndoButton.setEnabled(False)
self.elementSelectionSelectButton.setChecked(False)
# self.measurement.purge_selection()
# self.measurement.reset_select()
self.canvas.draw_idle()
def __toggle_tool_zoom(self):
if self.__button_zoom.isChecked():
self.mpl_toolbar.mode_tool = 2
else:
self.mpl_toolbar.mode_tool = 0
# self.elementSelectionButton.setChecked(False)
# self.elementSelectUndoButton.setEnabled(False)
self.elementSelectionSelectButton.setChecked(False)
# self.measurement.purge_selection()
# self.measurement.reset_select()
self.canvas.draw_idle()
def __toggle_drag_zoom(self):
self.__tool_label.setText("")
if self.__button_drag.isChecked():
self.mpl_toolbar.pan()
if self.__button_zoom.isChecked():
self.mpl_toolbar.zoom()
self.__button_drag.setChecked(False)
self.__button_zoom.setChecked(False)
def __fork_toolbar_buttons(self):
super().fork_toolbar_buttons()
self.mpl_toolbar.mode_tool = 0
self.__tool_label = self.mpl_toolbar.children()[24]
self.__button_drag = self.mpl_toolbar.children()[12]
self.__button_zoom = self.mpl_toolbar.children()[14]
self.__button_drag.clicked.connect(self.__toggle_tool_drag)
self.__button_zoom.clicked.connect(self.__toggle_tool_zoom)
# Make own buttons
self.mpl_toolbar.addSeparator()
self.elementSelectionButton = QtGui.QToolButton(self)
self.elementSelectionButton.clicked.connect(self.enable_element_selection)
self.elementSelectionButton.setCheckable(True)
self.__icon_manager.set_icon(self.elementSelectionButton, "select.png")
self.elementSelectionButton.setToolTip("Select element area")
self.mpl_toolbar.addWidget(self.elementSelectionButton)
# Selection undo button
self.elementSelectUndoButton = QtGui.QToolButton(self)
self.elementSelectUndoButton.clicked.connect(self.undo_point)
self.__icon_manager.set_icon(self.elementSelectUndoButton, "undo.png")
self.elementSelectUndoButton.setToolTip("Undo last point in open selection")
self.elementSelectUndoButton.setEnabled(False)
self.mpl_toolbar.addWidget(self.elementSelectUndoButton)
self.mpl_toolbar.addSeparator()
# Element Selection selecting tool
self.elementSelectionSelectButton = QtGui.QToolButton(self)
self.elementSelectionSelectButton.clicked.connect(
self.enable_selection_select)
self.elementSelectionSelectButton.setCheckable(True)
self.elementSelectionSelectButton.setEnabled(False)
self.__icon_manager.set_icon(self.elementSelectionSelectButton,
"selectcursor.png")
self.elementSelectionSelectButton.setToolTip("Select element selection")
self.mpl_toolbar.addWidget(self.elementSelectionSelectButton)
# Selection delete button
self.elementSelectDeleteButton = QtGui.QToolButton(self)
self.elementSelectDeleteButton.setEnabled(False)
self.elementSelectDeleteButton.clicked.connect(self.remove_selected)
self.__icon_manager.set_icon(self.elementSelectDeleteButton, "del.png")
self.elementSelectDeleteButton.setToolTip("Delete selected selection")
self.mpl_toolbar.addWidget(self.elementSelectDeleteButton)
self.mpl_toolbar.addSeparator()
# Selection delete all -button
self.elementSelectionDeleteButton = QtGui.QToolButton(self)
self.elementSelectionDeleteButton.clicked.connect(
self.remove_all_selections)
self.__icon_manager.set_icon(self.elementSelectionDeleteButton,
"delall.png")
self.elementSelectionDeleteButton.setToolTip("Delete all selections")
self.mpl_toolbar.addWidget(self.elementSelectionDeleteButton)
def on_click(self, event):
'''On click event above graph.
Args:
event: A MPL MouseEvent
'''
# Only inside the actual graph axes, else do nothing.
if event.inaxes != self.axes:
return
# Allow dragging and zooming while selection is on but ignore clicks.
if self.__button_drag.isChecked() or self.__button_zoom.isChecked():
return
cursorlocation = [int(event.xdata), int(event.ydata)]
# TODO: Possible switch to QtCore's mouseclicks
# buttond = {QtCore.Qt.LeftButton : 1,
# QtCore.Qt.MidButton : 2,
# QtCore.Qt.RightButton : 3,
# # QtCore.Qt.XButton1 : None,
# # QtCore.Qt.XButton2 : None,
# }
# However, QtCore.Qt.RightButton is actually middle button (wheel) on
# windows. So we'll use the numbers instead since they actually work
# cross-platform just fine.
# [DEBUG] Middle mouse button to debug current zoom levels or position.
# if event.button == 2:
# print()
# print("VIEWS:")
# for item in self.mpl_toolbar._views:
# print("\t{0}".format(item))
# print("POSITIONS:")
# for item in self.mpl_toolbar._positions:
# print("\t{0}".format(item))
if event.button == 1: # Left click
if self.elementSelectionSelectButton.isChecked():
if self.measurement.selection_select(cursorlocation) == 1:
# self.elementSelectDeleteButton.setChecked(True)
self.elementSelectDeleteButton.setEnabled(True)
self.canvas.draw_idle()
self.__on_draw_legend()
if self.elementSelectionButton.isChecked(): # If selection is enabled
if self.measurement.add_point(cursorlocation, self.canvas) == 1:
self.__on_draw_legend()
self.__emit_selections_changed()
self.canvas.draw_idle() # Draw selection points
if event.button == 3: # Right click
# Return if matplotlib tools are in use.
if self.__button_drag.isChecked():
return
if self.__button_zoom.isChecked():
return
# If selection is enabled
if self.elementSelectionButton.isChecked():
if self.measurement.end_open_selection(self.canvas):
self.elementSelectionSelectButton.setEnabled(True)
self.canvas.draw_idle()
self.__on_draw_legend()
self.__emit_selections_changed()
return # We don't want menu to be shown also
self.__context_menu(event, cursorlocation)
self.canvas.draw_idle()
self.__on_draw_legend()
def __emit_selections_changed(self):
"""Emits a 'selectionsChanged' signal with the selections list as a parameter.
"""
self.emit(QtCore.SIGNAL("selectionsChanged(PyQt_PyObject)"),
self.measurement.selector.selections)
def __emit_save_cuts(self):
"""Emits a 'selectionsChanged' signal with the selections list as a parameter.
"""
self.emit(QtCore.SIGNAL("saveCuts(PyQt_PyObject)"), self.measurement)
def __context_menu(self, event, cursorlocation):
menu = QtGui.QMenu(self)
Action = QtGui.QAction(self.tr("Graph Settings..."), self)
Action.triggered.connect(self.graph_settings_dialog)
menu.addAction(Action)
if self.measurement.selection_select(cursorlocation,
highlight=False) == 1:
Action = QtGui.QAction(self.tr("Selection settings..."), self)
Action.triggered.connect(self.selection_settings_dialog)
menu.addAction(Action)
menu.addSeparator()
Action = QtGui.QAction(self.tr("Load selections..."), self)
Action.triggered.connect(self.load_selections)
menu.addAction(Action)
Action = QtGui.QAction(self.tr("Save cuts"), self)
Action.triggered.connect(self.save_cuts)
menu.addAction(Action)
if len(self.measurement.selector.selections) == 0:
Action.setEnabled(False)
coords = self.canvas.geometry().getCoords()
point = QtCore.QPoint(event.x, coords[3] - event.y - coords[1])
# coords[1] from spacing
menu.exec_(self.canvas.mapToGlobal(point))
def graph_settings_dialog(self):
'''Show graph settings dialog.
'''
TofeGraphSettingsWidget(self)
def selection_settings_dialog(self):
'''Show selection settings dialog.
'''
selection = self.measurement.selector.get_selected()
SelectionSettingsDialog(selection)
self.measurement.selector.auto_save()
self.on_draw()
self.__emit_selections_changed()
def load_selections(self):
'''Show dialog to load selections.
'''
filename = open_file_dialog(self, self.measurement.directory,
"Load Element Selection",
"Selection file (*.sel)")
if filename:
self.measurement.load_selection(filename)
self.on_draw()
self.elementSelectionSelectButton.setEnabled(True)
self.__emit_selections_changed()
def save_cuts(self):
'''Save measurement cuts.
'''
self.measurement.save_cuts()
self.__emit_save_cuts()
def enable_element_selection(self):
'''Enable element selection.
'''
self.elementSelectUndoButton.setEnabled(
self.elementSelectionButton.isChecked())
if self.elementSelectionButton.isChecked(): # if button is enabled
# One cannot choose selection while selecting
self.elementSelectionSelectButton.setChecked(False)
self.__toggle_drag_zoom()
self.mpl_toolbar.mode_tool = 3
str_tool = self.tool_modes[self.mpl_toolbar.mode_tool]
self.__tool_label.setText(str_tool)
self.mpl_toolbar.mode = str_tool
else:
self.__tool_label.setText("")
self.mpl_toolbar.mode_tool = 0
self.mpl_toolbar.mode = ""
self.measurement.purge_selection() # Remove hanging selection points
self.measurement.reset_select()
self.canvas.draw_idle()
self.__on_draw_legend()
def enable_selection_select(self):
'''Enable selection selecting tool.
'''
if self.elementSelectionSelectButton.isChecked():
self.measurement.purge_selection()
self.canvas.draw_idle()
# One cannot make new selection while choosing selection
self.elementSelectionButton.setChecked(False)
self.elementSelectUndoButton.setEnabled(False)
self.__toggle_drag_zoom()
self.mpl_toolbar.mode_tool = 4
str_tool = self.tool_modes[self.mpl_toolbar.mode_tool]
self.__tool_label.setText(str_tool)
self.mpl_toolbar.mode = str_tool
else:
self.elementSelectDeleteButton.setEnabled(False)
self.__tool_label.setText("")
self.mpl_toolbar.mode_tool = 0
self.mpl_toolbar.mode = ""
self.measurement.reset_select()
self.__on_draw_legend()
self.canvas.draw_idle()
def remove_selected(self):
'''Remove selected selection.
'''
self.measurement.remove_selected()
self.measurement.reset_select() # Nothing is now selected, reset colors
self.measurement.selector.auto_save()
self.elementSelectDeleteButton.setEnabled(False)
self.__on_draw_legend()
self.canvas.draw_idle()
self.__emit_selections_changed()
def remove_all_selections(self):
'''Remove all selections.
'''
reply = QtGui.QMessageBox.question(self,
"Delete all selections",
"Do you want to delete all selections?\nThis cannot be reversed.",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.measurement.remove_all()
self.__on_draw_legend()
self.canvas.draw_idle()
self.__emit_selections_changed()
def undo_point(self):
'''Undo last point in open selection.
'''
self.measurement.undo_point()
self.canvas.draw_idle()
def show_yourself(self, ui):
'''Show ToF-E histogram settings in ui.
Args:
ui: A TofeGraphSettingsWidget's .ui file variable.
'''
# Populate colorbox
dirtyinteger = 0
colors = sorted(MatplotlibHistogramWidget.color_scheme.items())
for k, unused_v in colors: # Get keys from color scheme
ui.colorbox.addItem(k)
if k == self.measurement.color_scheme:
ui.colorbox.setCurrentIndex(dirtyinteger)
dirtyinteger += 1
# Get values
ui.bin_x.setValue(self.compression_x)
ui.bin_y.setValue(self.compression_y)
ui.invert_x.setChecked(self.invert_X)
ui.invert_y.setChecked(self.invert_Y)
ui.axes_ticks.setChecked(self.show_axis_ticks)
ui.transposeAxesCheckBox.setChecked(self.transpose_axes)
ui.radio_range_auto.setChecked(self.axes_range_mode == 0)
ui.radio_range_manual.setChecked(self.axes_range_mode == 1)
ui.spin_range_x_min.setValue(self.axes_range[0][0])
ui.spin_range_x_max.setValue(self.axes_range[0][1])
ui.spin_range_y_min.setValue(self.axes_range[1][0])
ui.spin_range_y_max.setValue(self.axes_range[1][1])
def __on_motion(self, event):
'''Function to handle hovering over matplotlib's graph.
Args:
event: A MPL MouseEvent
'''
event.button = -1 # Fix for printing.
if event.inaxes != self.axes:
return
if event.xdata == None and event.ydata == None:
return
in_selection = False
points = 0
point = [int(event.xdata), int(event.ydata)]
if self.measurement.selector.axes_limits.is_inside(point):
for selection in self.measurement.selector.selections:
if selection.point_inside(point):
points = selection.get_event_count()
in_selection = True
break
if in_selection:
if self.mpl_toolbar.mode_tool:
str_tool = self.tool_modes[self.mpl_toolbar.mode_tool]
str_text = str_tool + "; points in selection: {0}".format(points)
else:
str_text = "points in selection: {0}".format(points)
self.mpl_toolbar.mode = str_text
else:
if self.mpl_toolbar.mode_tool:
self.mpl_toolbar.mode = self.tool_modes[self.mpl_toolbar.mode_tool]
else:
self.mpl_toolbar.mode = ""
def sc_comp_inc(self, mode):
"""Shortcut to increase compression factor.
Args:
mode: An integer representing axis or axes to change.
"""
if (mode == 0 or mode == 2) and self.compression_x < 3000:
self.compression_x += 1
if (mode == 1 or mode == 2) and self.compression_y < 3000:
self.compression_y += 1
self.on_draw()
def sc_comp_dec(self, mode):
"""Shortcut to decrease compression factor.
Args:
mode: An integer representing axis or axes to change.
"""
if (mode == 0 or mode == 2) and self.compression_x > 1:
self.compression_x -= 1
if (mode == 1 or mode == 2) and self.compression_y > 1:
self.compression_y -= 1
self.on_draw()
|
gpl-2.0
|
Srisai85/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
278
|
3402
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.